�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  �w   �� ����&�&�P6ֳv(�v(�v���v@���� #)/5;AGMSV\bhntz��������������������� $*06<BHNTZ_ekqw}��������������������� $*06<BHNTZ`ekou{���������������������� !'-37=CIOUZ`flrx~���������������������� !'-39?EKQW]ciou{���������������������� "(.4:@FKQW]ciou{���������������������� !'-28=CINTZ`flrx~���������������������� "(-39?EKQW\bhntz���������������������� "(.4:@FLRX^djpv|����������������������    # ) / 5 ; A G M S Y _ e k q w | � � � � � � � � � � � � � � � � � � � � � �      & , 2 8 > D H N T Y _ e k q w } � � � � � � � � � � � � � � � � � � � � � �     & , 2 8 > D J P V \ b g l r x } � � � � � � � � � � � � � � � � � � � � �     $ * 0 6 < B H N T Z _ e k q w } � � � � � � � � � � � � � � � � � � � � � �     ! ' - 3 9 ? E K Q W ] c i o u { � � � � � � � � � � � � � � � � � � � � � �  "(.4:@FLRX^djpv|���������������������� !'-39?EKQW]ciou{����������������������  &,28=CIOU[`ekqw}����������������������%)/5;AGMSY_ekqw}���������������������� !'-37<BHMSX^bhntz����������������������� !'-39?DJPTZ`ekqw|���������������������� "(.4:@FKQW\agkqw|���������������������� !'-06<BHNTZ`flrx~��������������������� $*06<BHNTZ`flrx}���������������������6466512471834325656889449453381541027182476322255785157965357672127913521422124544077764917948404090867612939064295341419231096286822329350650072028315103518068446371672701664585966111704889476456731388573599391312776282430572264877629289112889870556862330945624858464390149682350446039825581290316455870595280829952550751828995858071309351700166198525339875065347292585820268786763105857492908189193055303607257182278367042883221494427750782907285587027198842674935049449438919804515974454758549468716874846747116982733762730659269466614700553110761463011889969024457659585750285583472003181701111666015921877874907663481127999157731135136231034430664016548115502240698088149890738267605319681874916565533835603743291968103581757208778477618471227213708522593246541295714191425798597864533760064568226716684603341475642100536315883370477305563197542491684935480344373648016655385361989767461175439193945791742446179352064842265515985757358130089679012251046926533578948252558521637349708494967443319318960372198166385749583462410115341098751007597390355412402423842138276020818544724760264202124903484445337853489089203656241680895119153593391439418602544238660337771963153766198910341529446493248000726006844703452609515634675331860448181264341618196433498297540758467727023484661403071322522291079041140444025756945105068307883725602083630984335724325111683816888768874186344686833951291787489269143079345056265343924434373585785535056264847236146528997694347623884233506203511998637269975154223332068524653100397269325813953796777867951550588510018779468974888282398259949076894922838609085988208340766555199554559581960977917624617162812165522617268211156543879018061168439565400386764737161688725831015447545318423042191075596144386225221819542961276540408201706659546946706054839083886732007216767780883855126538629116590960198928749035163999880229104562303205656817598550899253993405794655928578773884685282757161803085223595554186893905206886754933550836666586219544189134521266609617798824840754673586121768107273684136078171899215153268768698850758028913703534693174206738298794746552014796464678224872835833881956221801239684601838313836444782286793213958470376092181285402877896619505013935094937325748563519022249288269179787658914415189693426001486303858498036309537542686648964252647706386507161086664948978113630066441502682702861287120727616682868769415192234953063749776165373019052882682780285145427013118986523735732707352200558583267849043717876364294587509217083166492674293312351660610326650956666942331679706877416837401221288568850753442018575076574817377176615389263464285481969075511346118420346740985169608580841245722272201801355513509433131412043972538891353661362117667465264185566513868968732092631669999724830354933731513570008579223945362137961638271750017726272998747736286842841591253736069005025162847816461697677939895646590587325275408224754391117373349401472668701666747766548236108871151230502837308243231735281212508799353797212618725145595412464544127176387896398480745435323647318681121151221259054054582218081381036857077197652321521120936521926404403435474134158712772536761393318113785705552894255858704251607006027656024993922397750817159460650328925102860156535419751149458634621873196968949700883548447504296510315804068908639535306446749345233356180629822371312215986319261613298120564450168197112903325825438639999751170524981305308903569198599318736775963852766506869601895141353143395379906391045159608816363864574348051699656698195176377205272677664301152736135342293480268690064187372174945782203443508168255442637891318209491576027832808011813192348912644190205160075820218841835190893818704195466802325865031685432884701665464728926124994917014967202717466362387379228592735524388603932702117581703954775902366499647119787976260319355727741399756329891416837582778188390026773255688994004783038739018091634015989157475018775091228308846002786482059477931842387826844223515361337086723646628283583374104846910785609838767941567654205636105713135144702251242870508390757227416835008530614429887014559518639129299990454841870367636119939564413460185687277742845788428047483545042408486909088219452139241799823733760149129532947142688972361347887716356047386347342174933715466942134717026157377550905263307735243876016368878752749006023944344143447263553677654107191093664058357057117474162890747031828970401356810426445944385976040687590111528651842109892776771406166761967646160528751495175162899125423854330310572689927138817413184749094618198775061401151439881579963695385630631146523818526522158269848227780386899545680325625028241398502775122578158192482732281291198626947837473602146922603507078137716837804599613794557127461790367130831462273918912672283877609244829531145755474885365236797128809935443497221906860430564785320342386595933810653164303695224634571701274117529118704519893561816428155506014497603357270514670379867753635600628228195644620529985611418289504752324811221207347744857963518388127328899712219889322722453965098584434873297618638823012527438602766471688468778869929557529210167311070397712771286379385323035570965856381502424348312849226195251735138704918821274903322942735642691263201677944972741896355076174088542941789379699932214344942406407888512626894649996233753227915107160785839433978763568314731340423934360071378984352597101806387091384323601745155420764288227939383851623076524675241544882017112974681294924752222057342089794881151885868641004797612812654162742071482683557256514594377948145146872120884423637136296687844378226836058353025357378998763302947707855596740076192043175025688190834059141536871349416417809189240506263252183651378993420821688985769465785679230051870419166028622232406581867578565259841663168070241134142666116114872899319918687996694532326533420428875832860825876732871165485938712706116394833323132194344645951684015915316342694530670249699013280239494\ J �'�58S=׉L��^�>���Q��w���"t,5AP�x��w������T�GG��ȿ���aaYgK}Єl���h�[���F +L-cNh�S����_� �\�j�n�x`|>�����O�m��3_��8�r8wq=�&�H�3����:R`ic2z���������� F!�<\�����������T � 6* �h Zv :� �� �� �� �� k f �, >3 OO lx 3� �� ٬ ~� .� ~� ��  J lZ �` �u iz ā �� а �� e� ! �g ,x � �� �� z� Ԫ � o� � �� ��OKiNv+����L���(�+_8]H�t��z�����F=�n!x\��������&�+ IDYz�����\����k#�"�)�/{Z�����|���6��u�*u4�OoTv4K�Nvu��� �Fej�l�t3~ٜ����,�� �,��8{A^o��Z��/�3 Q�STlpy�����V<$`�f�|��ּ���T�d�QP9�������"/<�G������� ��� ��K�PTTU_�i������ޣ��������� Lv��8�s�]�+�h���A�������n \�*��w���\�¾i����4 � T!��� �4 ۄ � З �� �� n� !=�!@�!z�!�!��!{:"�C"^g"No"�~"��"��"V#4#�V#�g#�r#�#��#�#8�#�$�$?*$,P$�d$��$Õ$��$��$5�$3�$z�$�$�%�.%�=%AL%*�%҅%��%ϝ%��%b�%��%��%k�%i�%�%&P%&�2&eB&^V&ۈ&s�&v�&G�&r�&]�&('� 'o*'O-'A:'�'��'�'��'�'��'1�'��'�:(lw(��(u�(W�(F�(�(��(��(��(~)k?)�O)��)��)�*)(*W4*�M*�Q*T*v�*��*�*{�*D�*�*S +"0+#L+Qb+�o+G+ �+��+y�+��+E,^%,6',vZ,��,��,��,2-u�-��- �-�-{�-��-.. �.9�.��.9/ +/tG/'~/��/��/�/޳/;0�J0Ap0��0��0��0�51�j1��1\�1U�1��1�2�2�2]2�l2w�2-�2җ2�2��243�3>C3-K33W3�`3R�3��3��3�a4��4��4O�4I�45 5we5�n5A�5��5�5n�5:6 (6Fs6ԕ6��6 �6-�6�*7�u7�#8��8�8��8�n99v9X�9�:�,:r?:VO:�V:�b:�:U�:��:ž:U�:�;6;�^;R�;��;��;r�;G�;c<oZ<�|<E�<��<z�<Ͳ<p�<��<��<�<=�5=�=�=�=��=��=��=��=�>�M>�[>Tr>v�>?,D?�U?+�?@N@:\@�n@�x@�@�>AlHA��A�A��A��A�-B0GB+hB0tB۶B��B� C�C;C�C@�C��CN�C_�C��C� DxD/D�WD�^D�aDE�DL�D>�Dc�D��D�E�E�'E�*E@�E��EtF F�.Fs1F�JFSFH]Fk�F��F�F�G'�G�GɰG�G�Gt�G�SH�H�HB�H��H8�H�Ht�H��Hv�HU�H��HtHI]QIdZIbfI�xI5|I��I��I�(J(1J�8J�HJ�vJz�JءJ��J1�JɻJ��J��J��J��JB�J�KoK�zKwK^�KS�K��K�0L�>L�MLyM�-M�4M�aMfjM�~MV�M��Mi�M��M��M4N�1N�GN XN�tN�NaoO؁O��OEPNP�(P>;PhKP�sPB�P��P~�P��P��Pa�P� Q QQ�oQ�wQ��Q�Q�QR�R$�R �R�RKWSU|S�SM�Sq�S1Tz�T]�T�9UCUJ\U��UۖU��U¹U��UT�U��U��Um�U�V�Vx V{1V�lVd�V5�V��VۘW��WU�W��W�XX�X3Xs)X68XBPXkiX�rX�X��Xe�X��Xz�X3YYY�6Ya�Qa`Uara�ua{�aېa�a�'bf2bFbNab��bk�bn�b=�b_�bcWc;&c|cǁcK�c|�c��c�AdGd�PdbWd�adhdLqd��dy�d/�d��d=e�e eLCe�qe�e��f��f� g]6g�9g�Hg]_gE�g��gP h_5hLh�Wh�hV�h$.i�7iIi�Ui {i.�i��i��i�i��i��i �i� j�Kj�Qj��jQ�j&�j �j �j�k�kcpk�}k��k�k�k�k�k��k��k�7lBl�Yl%�l�mA$m�+m�pm$�m�mo�m��m��m�'n�:n�Bn�qn��nf�n��n�n��n�ho6�ob�o �o��o4 p�p�$p�bpfpF�pY�p;�p��p��pԍq��ql�qC�q��qv�q2�q*rfXr�br�vr��r��r[�r�s�1sz�s�s��s��s9�sdtFt+t�Ytw\tFgtut~xt\�tR�t��tv�t_�t�tr�t��t� uuV%u�3u�Du�Nu�uڪu��u�:v#Dv)Mv�Tv�Xv uvޚvֳv# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PythonAdv] # language: python # name: conda-env-PythonAdv-py # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation from tensorflow.keras.optimizers import Adam # + # Bring in train and test data X_train = np.load('./Data/X_train.npy') X_test = np.load('./Data/X_test.npy') y_train = np.load('./Data/y_train.npy') y_test = np.load('./Data/y_test.npy') # + # Set dimensions of images, 1 for one layer im_shape = (360, 480, 1) X_train = X_train.reshape(X_train.shape[0], *im_shape) X_test = X_test.reshape(X_test.shape[0], *im_shape) # - # ### Build CNN model using AlexNet architecture cnn_model = Sequential([ Conv2D(filters=96, kernel_size=(13, 13), strides=4, input_shape=im_shape), BatchNormalization(axis=3), Activation('relu'), MaxPooling2D(pool_size=(3, 3), strides=2), Dropout(0.2), Conv2D(filters=256, kernel_size=(7, 7), padding='same'), BatchNormalization(axis=3), Activation('relu'), MaxPooling2D(pool_size=(3, 3), strides=2), Dropout(0.2), Conv2D(filters=384, kernel_size=(3, 3), padding='same'), BatchNormalization(axis=3), Activation('relu'), Dropout(0.2), Conv2D(filters=384, kernel_size=(3, 3), padding='same'), BatchNormalization(axis=3), Activation('relu'), Dropout(0.2), Conv2D(filters=256, kernel_size=(3, 3), padding='same'), BatchNormalization(axis=3), Activation('relu'), Dropout(0.2), MaxPooling2D(pool_size=(3, 3), strides=2), Flatten(), Dense(4096, activation='relu'), Dense(4096, activation='relu'), Dense(2, activation='softmax') ]) cnn_model.compile( loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'] ) cnn_model.fit( X_train, y_train, batch_size=100, epochs=46, verbose=1, validation_data=(X_test, y_test) ) score = cnn_model.evaluate(X_test, y_test, verbose=2) print(f"Loss: {score[0]}, Accuracy: {score[1]}") cnn_model.save('cnn_model.h5') try: import openmdao.api as om import dymos as dm except ImportError: # !python -m pip install openmdao[notebooks] # !python -m pip install dymos[docs] import openmdao.api as om import dymos as dm # # The Brachistochrone with Externally-Sourced Controls # # ```{admonition} Things you'll learn through this example # - How to provide trajectory control values from an external source. # ``` # # This example is the same as the other brachistochrone example with one exception: the control values come from an external source upstream of the trajectory. # # The following script fully defines the brachistochrone problem with Dymos and solves it. # A new `IndepVarComp` is added before the trajectory. # The transcription used in the relevant phase is defined first so that we can obtain the number of control input nodes. # The IndepVarComp then provides the control $\theta$ at the correct number of nodes, and sends them to the trajectory. # Since the control values are no longer managed by Dymos, they are added as design variables using the OpenMDAO `add_design_var` method. om.display_source("dymos.examples.brachistochrone.doc.brachistochrone_ode") # + import numpy as np import openmdao.api as om import dymos as dm import matplotlib.pyplot as plt from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE # # Define the OpenMDAO problem # p = om.Problem(model=om.Group()) # Instantiate the transcription so we can get the number of nodes from it while # building the problem. tx = dm.GaussLobatto(num_segments=10, order=3) # Add an indep var comp to provide the external control values ivc = p.model.add_subsystem('control_ivc', om.IndepVarComp(), promotes_outputs=['*']) # Add the output to provide the values of theta at the control input nodes of the transcription. ivc.add_output('theta', shape=(tx.grid_data.subset_num_nodes['control_input']), units='rad') # Add this external control as a design variable p.model.add_design_var('theta', units='rad', lower=1.0E-5, upper=np.pi) # Connect this to controls:theta in the appropriate phase. # connect calls are cached, so we can do this before we actually add the trajectory to the problem. p.model.connect('theta', 'traj.phase0.controls:theta') # # Define a Trajectory object # traj = dm.Trajectory() p.model.add_subsystem('traj', subsys=traj) # # Define a Dymos Phase object with GaussLobatto Transcription # phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx) traj.add_phase(name='phase0', phase=phase) # # Set the time options # Time has no targets in our ODE. # We fix the initial time so that the it is not a design variable in the optimization. # The duration of the phase is allowed to be optimized, but is bounded on [0.5, 10]. # phase.set_time_options(fix_initial=True, duration_bounds=(0.5, 10.0), units='s') # # Set the time options # Initial values of positions and velocity are all fixed. # The final value of position are fixed, but the final velocity is a free variable. # The equations of motion are not functions of position, so 'x' and 'y' have no targets. # The rate source points to the output in the ODE which provides the time derivative of the # given state. phase.add_state('x', fix_initial=True, fix_final=True, units='m', rate_source='xdot') phase.add_state('y', fix_initial=True, fix_final=True, units='m', rate_source='ydot') phase.add_state('v', fix_initial=True, fix_final=False, units='m/s', rate_source='vdot', targets=['v']) # Define theta as a control. # Use opt=False to allow it to be connected to an external source. # Arguments lower and upper are no longer valid for an input control. phase.add_control(name='theta', targets=['theta'], opt=False) # Minimize final time. phase.add_objective('time', loc='final') # Set the driver. p.driver = om.ScipyOptimizeDriver() # Allow OpenMDAO to automatically determine our sparsity pattern. # Doing so can significant speed up the execution of Dymos. p.driver.declare_coloring() # Setup the problem p.setup(check=True) # Now that the OpenMDAO problem is setup, we can set the values of the states and controls. p.set_val('traj.phase0.states:x', phase.interp('x', [0, 10]), units='m') p.set_val('traj.phase0.states:y', phase.interp('y', [10, 5]), units='m') p.set_val('traj.phase0.states:v', phase.interp('v', [0, 5]), units='m/s') p.set_val('traj.phase0.controls:theta', phase.interp('theta', [90, 90]), units='deg') # Run the driver to solve the problem p.run_driver() # Test the results print(p.get_val('traj.phase0.timeseries.time')[-1]) # Check the validity of our results by using scipy.integrate.solve_ivp to # integrate the solution. sim_out = traj.simulate() # Plot the results fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4.5)) axes[0].plot(p.get_val('traj.phase0.timeseries.states:x'), p.get_val('traj.phase0.timeseries.states:y'), 'ro', label='solution') axes[0].plot(sim_out.get_val('traj.phase0.timeseries.states:x'), sim_out.get_val('traj.phase0.timeseries.states:y'), 'b-', label='simulation') axes[0].set_xlabel('x (m)') axes[0].set_ylabel('y (m/s)') axes[0].legend() axes[0].grid() axes[1].plot(p.get_val('traj.phase0.timeseries.time'), p.get_val('traj.phase0.timeseries.controls:theta', units='deg'), 'ro', label='solution') axes[1].plot(sim_out.get_val('traj.phase0.timeseries.time'), sim_out.get_val('traj.phase0.timeseries.controls:theta', units='deg'), 'b-', label='simulation') axes[1].set_xlabel('time (s)') axes[1].set_ylabel(r'$\theta$ (deg)') axes[1].legend() axes[1].grid() plt.show() # + from openmdao.utils.assert_utils import assert_near_equal assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # ## functions for doing the work # # For a given `filename`, `colnum` and `rank` create the desired matrix, and possibly save it in a comma separate value format that is readable by excel, origin, etc. # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable def create_matrix(filename, colnum, rank, sep=':', savecsv=False): df = pd.read_csv(filename, sep=sep, header=None, comment='#') matrix = df.iloc[:, [colnum]].values.reshape(rank, rank) if savecsv: mname = os.path.splitext(os.path.basename(filename))[0] + '_col-{0:d}'.format(colnum) + '.csv' np.savetxt(mname, matrix) return matrix def heatmap(matrix, size, cmap='viridis', show_colorbar=True): ''' Args: matrix : array_like Matrix to be colormapped size : int Size of the plot in inches ''' plt.figure(figsize=(size, size)) plt.pcolor(m, cmap=cmap) ax = plt.gca() ax.set_aspect('equal') if show_colorbar: divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.2) plt.colorbar(cax=cax) plt.tight_layout() plt.show() # - # ## create the matrix with a specified rank from a given column m = create_matrix('data/result_slice01.txt', colnum=1, rank=58) # # plot the heatmap # # Other possible heatmaps are listed in [matplotlib documentation](https://matplotlib.org/examples/color/colormaps_reference.html) heatmap(m, 10) # ## different colum and colormap m = create_matrix('data/result_slice01.txt', colnum=6, rank=58) heatmap(m, 10, cmap='Greens_r') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from keras.models import load_model import numpy as np import cv2 as cv import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D, activations from keras.utils import np_utils from keras import backend as K K.set_image_dim_ordering('th') import numpy as np import pandas as pd from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score import os from imutils import paths import glob Data = np.load('Data.npy') Label = pd.read_csv('Label.csv') classes = pd.read_csv('Class.csv') Data.shape Label.shape print(classes.shape) X_train, X_test, y_train, y_test = train_test_split(Data/255.,classes,test_size=0.1,random_state=0) y_train_cnn = np_utils.to_categorical(y_train) y_test_cnn = np_utils.to_categorical(y_test) num_classes = y_test_cnn.shape[1] X_train_cnn = X_train.reshape(X_train.shape[0], 3, 256, 256).astype('float32') X_test_cnn = X_test.reshape(X_test.shape[0], 3, 256, 256).astype('float32') X_train_cnn.shape X_test_cnn.shape def cnn_model(): model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=(3, 256, 256), activation = 'relu')) model.add(Conv2D(32, (3, 3), activation = 'relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # the CONV CONV POOL structure is popularized in during ImageNet 2014 model.add(Dropout(0.25)) # this thing called dropout is used to prevent overfitting model.add(Conv2D(64, (3, 3), activation = 'relu')) model.add(Conv2D(64, (3, 3), activation = 'relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(4, activation= 'softmax')) optimizer = keras.optimizers.Adam() model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model print("[INFO] creating model...") model = cnn_model() # Fit the model print("[INFO] training model...") records = model.fit(X_train_cnn, y_train_cnn, validation_split=0.1, epochs=25, batch_size=16) # Final evaluation of the model print("[INFO] evaluating model...") scores = model.evaluate(X_test_cnn, y_test_cnn, verbose=0) print('Final CNN accuracy: ', scores[1]) print("[INFO] saving model...") model.save("model3.h5") # + import matplotlib.pyplot as plt cnn_probab = model.predict(X_test_cnn, batch_size=32, verbose=0) # extract the probability for the label that was predicted: p_max = np.amax(cnn_probab, axis=1) plt.hist(p_max, normed=True, bins=list(np.linspace(0,1,11))); plt.xlabel('p of predicted class'); # - N = 25 plt.style.use("ggplot") plt.figure() plt.plot(np.arange(0, N), records.history["loss"], label="train_loss") plt.plot(np.arange(0, N), records.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), records.history["acc"], label="train_acc") plt.plot(np.arange(0, N), records.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # + active="" # # Linear Regression - Target variable is Continuous variable # # Logistic Regression - Target variable is Categorical Variable # # Classification of Categorical variables - Binary(High/Low Risk), Multiclass(High,Med,Low Risk) # # Sigmoid or Logit Function returns Y or N # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linechart Example # + from fractions import Fraction import numpy as np from pewdieplot.graph.linechart import LineChart # - # Create some data using sine, cosine and tangent curves. x = np.arange(0, 2 * np.pi, 0.15) sin_y = np.sin(x) sin_points = np.dstack((x, sin_y)) cos_y = np.cos(x) cos_points = np.dstack((x, cos_y)) tan_y = np.tan(x) tan_points = np.dstack((x, tan_y)) all_points = np.concatenate((sin_points, cos_points, tan_points)) # Plot the curves defined above. # Additionally use a PyPlot function call to draw an arrow indicating the local maximum of the sine curve. # Build and show Line Chart. (LineChart('Line Chart Example', size=(12, 7)) .xlim(0, 2 * np.pi) .ylim(-4, 4) .xticks(np.arange(0, 2 * np.pi + 0.1, 0.5 * np.pi)) .yticks(np.arange(-5, 5, 1)) .xlabel_fn(lambda n: r'$%s\pi$' % Fraction(n / np.pi)) .legends(['Sin', 'Cos', 'Tan']) .data(all_points) .pyplot('annotate', 'Sine Max', xy=(0.5 * np.pi, 1), xytext=(2.5, 2), arrowprops=dict(facecolor='black', shrink=0.05)) ).show() pass # Supress object output in notebook. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="KZiWxbWlTzXt" # # TimesML # --- # TimesML 是為時間序列數據分析和機器學習而開發的,此套件目的是提供API給開發人員與資料科學家,能夠輕鬆的對時間序列數據建立模型,進行分析與預測。我們計畫未來提供更多機器學習模型,感謝您的支持。 # # PyPI: https://pypi.org/project/TimesML/ # # Github: https://github.com/leodflag/TimesML # #. # + [markdown] id="jcERQmOtTqwb" # ## 下載 TimesML # + id="i0xEf0ZtRWyT" colab={"base_uri": "https://localhost:8080/"} outputId="1fb2d65d-5a10-4a32-d827-24a689e8cd16" # !pip install TimesML # + [markdown] id="AiqlKLZkVLrp" # 我們先介紹套件TimeSeriesAnalysis底下的三個模組 # # ### ProcessData # 用來處理時間序列數據,包括取得、儲存資料,也有用來建立Model所需資料格式的函式。 # # ### Model # 內含各種時間序列的分析預測模型,如能觀察股票價格整體性趨勢的簡單移動平均,與利用序列資料自身的規律以預測未來的自迴歸模型等等。 # # ### Chart # 可將時間序列數據視覺化,讓人直觀的了解數據的歷史走勢、與自身前期資料的關係,以及模型預測後的視覺化結果等等。 # # . # + [markdown] id="hEuyJBK_cXRm" # # 開始使用TimesML # --- # 我們以2020/1到2020/10的20個國家COVID19每日新增確診人數資料為分析目標,一步步了解TimesML的用法。 # # ### Note # 若是使用自己電腦實作,所需資料網址如下,請自行下載 # # https://github.com/leodflag/TimesML/blob/master/test_data/g20_new_c.csv # # # --- # # * Test data set : g20_new_c.csv # ** , , . An interactive web-based dashboard to track COVID-19 in real time. Lancet Inf Dis. 20(5):533-534. doi: 10.1016/S1473-3099(20)30120-1 # #. # + [markdown] id="6uGJJTWiR9vN" # ## import 模組 # + id="_VndvdHkVMOJ" import TimeSeriesAnalysis.ProcessData as Data import TimeSeriesAnalysis.Model as Model import TimeSeriesAnalysis.Chart as Chart # + [markdown] id="TYRBxBsLbCrR" # ## 基礎設定 # + id="tZLKbVWQbM7H" save_path = 'US' # 存檔路徑 chart = Chart.chart('US') # 建立物件chart, 輸入的文字為時間序列數據名稱,之後畫的圖都會有此標題 # + [markdown] id="MTOEP-17cPKO" # ## 讀檔 # + id="glyJ2HhJfJL0" data = Data.read_file(path="https://raw.githubusercontent.com/leodflag/TimesML/master/test_data/g20_new_c.csv", col_name='US') # + [markdown] id="Y9LccjCQffs-" # 這邊就使用到了ProcessData的讀檔函式,path為目標檔案路徑,col_name是欄位名稱,因此從檔案取得的格式會是一維數據。 # + colab={"base_uri": "https://localhost:8080/"} id="Mq5SxSeSiBqQ" outputId="919f1554-4034-4f50-d9d9-4f2ab3e13239" print(data.head) # 印出前後5筆 # + [markdown] id="V6fpLUq3fS7z" # ## Chart畫圖: 歷史趨勢線圖 # # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="GefPHE-vf6C4" outputId="cddc5401-8bcb-4f99-842d-c5c3856e42a0" chart.historocal_trend_line_chart(data, save_path, xlabel='date', ylabel='population') # save_path:所有的圖都會自動儲存到指定路徑, xlabel, ylabel:x軸、y軸的標籤 # + [markdown] id="LEVDnJXMjOdy" # 可以看到美國的COVID19每日新增確診人數。 # # #. # ## Chart 小知識 # # Chart在建立物件時會固定一張圖的輸出尺寸,因此圖的尺寸不設定的話會使用預設值。 # # 原本建立Chart物件時的參數如下: # # chart(title_times_data: str='times_data', figsize_length: int=9, figsize_width: int=6, # fontsize_title: int=15, fontsize_x_y: int=8, fontsize_x_y_label: int=11) # # . # + [markdown] id="d2TEgTLhsm9m" # ## 來個統計分析圖 # 觀察時間序列數據的性質 # + id="qrX8Wz-njNkS" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="f711e2cc-fab8-4a12-8ca4-dbbaef1145e6" chart.statistics_infographic(data, file_path=save_path, lags=12, xlabel='date', ylabel='population') # lags為落後期數(白話是前幾期),在此圖功能為提供ACF計算期數 # + [markdown] id="LNLmvzMqoyC0" # ## Lag Plot # # 原本是散佈圖,可以用來觀察自變數x與應變數y的關係,但Lag Plot這張圖專門來看時間序列數據當期與前一期數據之間的關係, # # y(t):當期,y(t-1):前一期 # # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="T5omfe5yqafO" outputId="92f1366e-aafe-4dd3-8dad-0e4107aa5401" chart.lag_plot(data, save_path) # + [markdown] id="QhagepXN0lhE" # 圖中的 $r$ 為相關係數,可以了解兩變數之間的關係。 # # . # # # + [markdown] id="vVIP1oUH237I" # ## 當期與前一期數據是啥? # + colab={"base_uri": "https://localhost:8080/"} id="AwskZDwn3qDT" outputId="987a72e9-16c8-42b5-bfb4-b1a235535db7" lag_data = Data.create_ar_data(data, lags=2) print(lag_data.head) # + [markdown] id="sladRDgI4uwr" # 欄位t是指date當期(當天)的資料,欄位t-1是data前一期(落後一期、前一天)的資料,因此以date所在的橫列來看,會存在當期與前一期的資料,此種資料設計方便運算 # # # + colab={"base_uri": "https://localhost:8080/"} id="KlxmZIF_71qS" outputId="92ae703e-b7cf-4629-e951-47df5ac29eec" print(lag_data['t-1'].head) # + [markdown] id="4nY0zPw80S7F" # ## 介紹Math.Statistics # ##### . # ### **注意事項:數學符號並未統一** # + id="vLgiqZxnxJ6X" import Math.Statistics as math # + [markdown] id="29l24rsdxKYt" # ## Pearson product-moment correlation coefficient 皮爾森相關係數 # [表示兩變數之間的相關程度,其值介於-1與1之間,定義為兩個變數的共變異數除以它們標準差的乘積](https://zh.wikipedia.org/wiki/%E7%9A%AE%E5%B0%94%E9%80%8A%E7%A7%AF%E7%9F%A9%E7%9B%B8%E5%85%B3%E7%B3%BB%E6%95%B0) # # ![image](https://wikimedia.org/api/rest_v1/media/math/render/svg/8b0d0608b5f85d24a9c572f8d1b5769289664dfb) # # * 樣本皮爾森相關係數 # $$r = \frac{ n(\sum_{i=0}^{n} x_{i}y_{i})-(\sum_{i=0}^{n} x_{i})(\sum_{i=0}^{n} y_{i})}{\sqrt {[n(\sum_{i=0}^{n} x_{i}^{2})-(\sum_{i=0}^{n} x_{i})^{2}][n(\sum_{i=0}^{n} y_{i}^{2})-(\sum_{i=0}^{n} y_{i})^{2}] }}$$ # + colab={"base_uri": "https://localhost:8080/"} id="RDbFoIlg1vP3" outputId="d102b5d1-474e-4545-edae-2e174b0e60b0" math.correlation_coefficient(lag_data['t'], lag_data['t-1']) # + [markdown] id="wZ1omNRHDrAx" # ## ACF chart # 透過math.ACF()不斷計算當期數據與遞延期數數據之間的ACF,可以畫出以下的圖,x軸為遞延期數,y軸為ACF數值,以此張圖來看,US的COVID19每日新增確診人數與自身的前期新增確診人數皆具有高度相關。 # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="9I5ioBsmDGiZ" outputId="32cf3d78-2f80-4213-d921-c149e22dc4f4" chart.ACF_chart(data, file_path=save_path, lags=10) # + [markdown] id="HOZDGMNg-Zvx" # ## ACF 自相關函數 # [也叫序列相關,是一個訊號於其自身在不同時間點的互相關,常用於訊號處理中,用來分析函數或一系列值,如時域訊號。](https://zh.wikipedia.org/wiki/%E8%87%AA%E7%9B%B8%E5%85%B3%E5%87%BD%E6%95%B0) # # $$ \rho_k = \frac{Cov(X_t, X_{t-k})} {\sigma_{X_t}{\sigma_{X_{t-k}}} } $$ # + colab={"base_uri": "https://localhost:8080/"} id="QW5vNSggD9go" outputId="d4be31e4-f550-4514-d69f-2395497a0f08" acf_lag_data = math.ACF(lag_data['t'], lag_data['t-1']) print(acf_lag_data) # + [markdown] id="1N8g4oAZg0gm" # ## 時間序列數據的另一個觀察方法:簡單移動平均 # [簡單移動平均](https://zh.wikipedia.org/wiki/%E7%A7%BB%E5%8B%95%E5%B9%B3%E5%9D%87)可以用來觀察數據的歷史趨勢,為此我們使用Model模組中的SimpleMovingAverage。 # # $$ \bar P_{SM}= \frac{P_M+P_{M-1}+...+P_{M-(n-1)}} {n} $$ # # # + id="ddI-1PK8l1kd" sma_model = Model.SimpleMovingAverage(windows=10) # windows為移動窗格,一次累積10筆資料計算平均 sma_model.fit(data) # + [markdown] id="BC2cttffm5Vr" # ## 簡單移動平均圖 # 簡單移動平均的計算方式會使數據歷史波動狀況趨於緩和,如下圖'SMA(7)'的綠線較歷史趨勢灰線平穩。綠線的每個資料點皆是今日加前6天平均的結果,因此可以將美國的每日確診人數轉成較為長期的趨勢,用以觀察疫情控制情況。 # # Model物件在計算完模型後,可使用屬性的方式取得內部資料,如下圖的sma_model.sma_result是經過簡單移動平均計算後的數據。 # # chart.line_chart()能放入兩個不同長度的一維資料,可自訂圖的標題,存成圖檔時也會直接存為檔名。 # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="__n9xZaum6i1" outputId="2f0d1cfd-2fb6-4c16-bec9-5dd37a484f84" chart.line_chart(data, sma_model.sma_result, chart_title='COVID19 US :SMA(10)',file_path=save_path, xlabel='date', ylabel='population') # + [markdown] id="-1sAQ1ibz7pE" # #. # ## 模型預測 # 透過以上的統計圖分析後了解美國的COVID19每日新增確診人數的歷史狀態,若要預測美國未來一個月的新增確診人數(好準備醫療資源等用途),可以使用Model中的自迴歸模型進行預測。 # # . # ## 切分資料集成訓練集與測試集 # 模型使用訓練集進行訓練,測試集來測試模型是否能對未來的未知情況進行預測。 # + id="61acrFeX24nO" train, test = Data.split_data(data, ratio=0.8) # 80%的資料當訓練集訓練模型,剩下的20%為測試集檢視模型能力 # + [markdown] id="zdHCOHz-3Z7l" # #. # ## AutoRegressive自我迴歸模型 # 最常見的時間序列模型之一,其假設變數會受到變數前期影響,因此將時間序列數據自己的歷史數據當作解釋變數。簡單的只使用前一期資料做解釋變數,就稱作[一階自我迴歸模型AR(1)](http://homepage.ntu.edu.tw/~sschen/Book/Slides/Ch3AR.pdf)。 # # $$ AR(1):y_t=\beta_0+\beta_1 y_{t-1}+ \varepsilon_t $$ # # $$ AR(p):y_t=\beta_0+\beta_1 y_{t-1}+ \beta_2 y_{t-2}+ ...+\beta_p y_{t-p}+ \varepsilon_t $$ # # + id="b2tx4dBn9IbN" model_1 = Model.AutoRegressive(lags=2) # 建立AR(2)模型 model_1.fit(train) # 訓練模型 model_1.predict(test, pure_test_set_predict=True) # 進行預測 # + [markdown] id="S_YwBaTH-CEk" # ###只需三行便可完成AR(2)模型的建立、訓練、預測! # # 設計pure_test_set_predict這個參數,是因為AR模型使用前期資料進行當期預測,模型的預測資料數必定會少於丟入的測試集資料數,因此pure_test_set_predict=True,表示只使用測試集進行預測;若為False,會使用測試集的資料當作預測的過去資料,而不會損耗測試集的資料。 # # + colab={"base_uri": "https://localhost:8080/"} id="AC_TRO6IIemX" outputId="c2b3e451-f436-4b43-f62c-c6e08cb99409" print(len(model_1.test_data),', ',len(model_1.test_predict)) # + colab={"base_uri": "https://localhost:8080/"} id="6xPLXnjjIsr9" outputId="952d0244-4a3c-4aff-ef20-83a07708f11f" model_1.predict(test, pure_test_set_predict=False) print(len(model_1.test_data),', ',len(model_1.test_predict)) # + [markdown] id="Ktu-Xxcmjpjl" # 丟入model_1模型訓練與預測後,會產生各種參數可以取出數值,上面的model_1.test_data就是在模型預測過程中產生的測試資料,其他還有很多,比如model_1.test_error(使用測試集的預測誤差)、 model_1.train_predict(使用訓練集的預測資料)等等 # + id="6jLmNfkDJrrL" model_2 = Model.AutoRegressive(lags=20) model_2.fit(train) model_2.predict(test,pure_test_set_predict= False) # 跟model_1設同樣的條件 # + [markdown] id="ZBjAyMhTJrVN" # # . # ## 比較兩種模型 # 訓練好的模型資訊(訓練集預測誤差、測試集預測誤差等等)都包含在Model物件裡,使用chart.forecast_result_group_chart直接看看 # + id="lBOYyC1rKAzc" colab={"base_uri": "https://localhost:8080/", "height": 505} outputId="fea54ce6-7e84-4330-8e28-083622fd6a8c" chart.forecast_result_group_chart(train, test, model_1, model_2, file_path=save_path, model_1_name='AR(2)', model_2_name='AR(20)', xlabel='date', ylabel='population') # + [markdown] id="1oWygo4hJcNO" # 視覺上可以很明顯的看到AR(2)預測結果相當直線條,非常的線性迴歸,AR(20)預測則較能捕捉到序列資料的浮動,直觀來說參考過去的資料多,代表訓練模型較能學習到時間序列資料的隱藏規則。 # # 圖內的MSE、MAE、RMSE、NRMSE都是Model內儲存數值,因此比較結果,都是AR(20)模型預測效果較好 # # . # # + colab={"base_uri": "https://localhost:8080/"} id="8j7AigmIeDcu" outputId="b2d19630-ba7b-4e35-920d-06996a11c64f" print(model_1.test_error) # 按順序是MSE、MAE、RMSE、NRMSE # + colab={"base_uri": "https://localhost:8080/"} id="HuWNLhlIeepA" outputId="e75dde98-6e86-4826-e82b-9b0bea5da381" print(model_2.test_error) # 按順序是MSE、MAE、RMSE、NRMSE # + [markdown] id="SkW1nml3NBdq" # ## 一連串的迴歸模型評測指標 # #. # ## MSE 均方誤差 # # 真實值減去預測值後再平方 # $$\frac{1}{m} \sum_{i=0}^{m} (y_i- \hat y_{i})^{2} $$ # # # + id="xzcZbk1pJ_2b" colab={"base_uri": "https://localhost:8080/"} outputId="c8d31588-d78c-48a3-8d88-2abe3d51bd08" mse = math.mean_square_error(model_1.test_data, model_1.test_predict) mse # + [markdown] id="B_Ya_ty7Snlz" # # ## MAE 平均絕對誤差 # # 真實值減去預測值後再取絕對值 # $$\frac{1}{m} \sum_{i=0}^{m}| (y_i- \hat y_{i})| $$ # # + id="ZpTWLrqvSqqh" colab={"base_uri": "https://localhost:8080/"} outputId="3d639aff-cad7-44a9-b7fb-5721ad8e0e2e" mae = math.mean_absolute_error(model_1.test_data, model_1.test_predict) mae # + [markdown] id="CWkmKVlYSiKi" # # ## RMSE 均方根誤差 # # MSE開根號 # $$ \sqrt{\frac{1}{m} \sum_{i=0}^{m} (y_i- \hat y_{i})^{2}} $$ # # + id="p9I4jE6OSmMd" colab={"base_uri": "https://localhost:8080/"} outputId="1ad929c1-aea0-4fb7-cbb9-e49cb21791fa" rmse = math.root_mean_squard_error(model_1.test_data, model_1.test_predict) rmse # + [markdown] id="_e3tI55LSrHJ" # # ## NRMSE 正規化方均根差 # [對RMSE進行正規化有助於在不同規模的數據集或模型之間進行比較](https://www.marinedatascience.co/blog/2019/01/07/normalizing-the-rmse/) # # # # $$NRMSE = \frac{RMSE}{y_{max}-y_{min}}$$ # $$$$ # $$ = \frac{ \sqrt{\frac{1}{m} \sum_{i=0}^{m} (y_i- \hat y_{i})^{2}} }{y_{max} - y_{min}} $$ # ####. # ## 實際遇到的問題 # A: # 有可能發生A模型的RMSE等等數值皆小於B模型,但是B模型在NRMSE卻小於A模型嗎? # # Q: # 有的,這是因為NRMSE考量到了模型使用的不同數據可能天差地遠,假如A模型須預測人口,B模型僅須預測股價,A模型計算的數值動輒百萬人次,若比RMSE一定很大,B模型所計算的股價波動也無法到達百萬,若比RMSE會很輕鬆就贏。但NRMSE正規化了RMSE後,A模型的預測能力可能會比B模型還要好,也就是NRMSE較小,A模型錯估上千人次在百萬人次裡顯的微不足道,B模型的小誤差1元反而會因為股價波動在10元左右而被放大。 # # # + id="3kszy7PsSsK1" colab={"base_uri": "https://localhost:8080/"} outputId="45dbffa5-aff9-441d-c51a-d98e04835cbb" nrmse = math.normalized_mean_squard_error(model_1.test_data, model_1.test_predict) nrmse # + [markdown] id="MeNa0bP88qkw" # ## 如何進一步判斷迴歸模型的解釋能力? # #####. # ## coefficient of determination 決定係數 # 衡量應變數的變異中可由自變數解釋部分所占的比例,可用來判斷統計模型解釋力(又稱判定係數)。 # # [對於簡單線性回歸而言,決定係數為樣本相關係數的平方。[4]當加入其他回歸自變量後,決定係數相應地變為多重相關係數的平方。](https://zh.wikipedia.org/wiki/%E5%86%B3%E5%AE%9A%E7%B3%BB%E6%95%B0) # #####. # [參考教材:高科大應用平台:判定係數](http://eschool.kuas.edu.tw/tsungo/Publish/14%20Simple%20linear%20regression%20analysis.pdf) # # * 簡單線性迴歸的決定係數 # $$ R^2 = \left( \frac{ n(\sum_{i=0}^{n} x_{i}y_{i})-(\sum_{i=0}^{n} x_{i})(\sum_{i=0}^{n} y_{i})}{\sqrt {[n(\sum_{i=0}^{n} x_{i}^{2})-(\sum_{i=0}^{n} x_{i})^{2}][n(\sum_{i=0}^{n} y_{i}^{2})-(\sum_{i=0}^{n} y_{i})^{2}] }} \right)^2$$ # #####. # # * 線性迴歸的決定係數 # $$ r^{2}=1-\frac {SSE}{SST}$$ # #####. # # # # * 平均觀察值 # $$ \bar y = \frac{1}{n} \sum_{i-0}^n y_i$$ # # * $SS_{tot}(SST)$ 總變異 # ** 實際觀察值與平均觀察值之間的差異平方和 # $$ \sum_{i=0}^{n}(y_i-\bar y)^2 $$ # #####. # # * $SS_{reg}(SSR)$ 迴歸項平方和 # ** 可解釋的變異 # ** 迴歸預測值與平均觀察值之間的差異平方和 # $$ \sum_{i=0}^{n}(\hat y_i-\bar y)^2 $$ # # #####. # * $SS_{res}(SSE)$ 殘差平方和 # ** 不可解釋的變異或隨機變異 # ** 實際觀察值與迴歸預測值之間的差異平方和 # $$ \sum_{i=0}^{n}(y_i-\hat y_i)^2 $$ # ** 有感覺的人應該可以發現,這公式就是常算的MSE # #####. # # #####. # * 總變異 = 可解釋變異+不可解釋變異 # # $$ SST = SSR + SSE$$ # #####. # # # * 線性迴歸的決定係數 # $$R^2 = \frac {SSR}{SST} = \frac {SST-SSE}{SST}=1-\frac {SSE}{SST}$$ # + [markdown] id="S2CEhtR8aop2" # ![image](https://upload.wikimedia.org/wikipedia/commons/thumb/8/86/Coefficient_of_Determination.svg/600px-Coefficient_of_Determination.svg.png) # # 決定係數$ r^{2}=1-{\frac {\color {blue}{SSE}}{\color {red}{SST}}}$示意圖 線性回歸(右側)的效果比起平均值(左側)越好,決定係數的值就越接近於1。 藍色正方形表示不可解釋變異, 紅色正方形數據表示總變異。 # + id="H4lT3U-9S0Ho" colab={"base_uri": "https://localhost:8080/"} outputId="c9bab01b-1a72-47f2-d0ee-53b3abe70b23" r_2 = math.coefficient_of_determination(train,model_1.test_data, model_1.test_predict) r_2 # + [markdown] id="o208ZnvUTNSP" # ## 以上為美國COVID19每日新增確診人數資料的TimesML簡單分析版本 # #####. # + [markdown] id="iKbkHygQbZCZ" # ## ProcessData.get_data_yahoo # 可以用爬蟲從yahoo finance取得想要的股票、貨幣歷史資料,並且自動儲存成檔案。 # 用法是輸入股票ID、開始日期、結束日期、存檔格式(支援txt,csv)、資料頻率(日資料、周資料、月資料、年資料),成功爬取時會出現Crawl successfully。 # 自動存檔功能是為了不要超過yahoo finance的爬取上限。 # # --- # # # * Use requests crawl yahoo finance stock price # * FinLab. 超簡單台股每日爬蟲教學. Retrieved July 22, 2020, from https://www.finlab.tw/超簡單台股每日爬蟲教學 # + colab={"base_uri": "https://localhost:8080/"} id="NQySZOOPWrga" outputId="cd5f035a-b783-40cd-cd89-c1edb3d308af" Data.get_data_yahoo(stock_id='AAPL', start_period='2019 12 3', end_period=' 2020 1,18', file_format='csv', frequency='day') # + id="DrYbs2RGX6qA" data = Data.read_file(path='AAPL/AAPL_day.csv', col_name='close') # + [markdown] id="HywB-nS-c6DI" # ## chart.historocal_trend_line_chart # APPLE公司的股價走勢 # # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="vpAas4Y_YLnv" outputId="767a24fb-5f95-4947-b941-c4f7d74a3721" chart.historocal_trend_line_chart(data, file_path='AAPL', xlabel='date', ylabel='price') # + [markdown] id="gRe3BzlfSyrg" # ## TimesML基礎用法介紹完畢 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 图论 Grahp Theory # # ## 节点Vertex # ## 变Edge # # ## 有向图 # ## 无向图 # # ## 有权图 # ## 无权图 # # ## 邻接矩阵 # ## 邻接表 # # 使用邻接矩阵来表示稠密图 Dense class DenseGraph: def __init__(self, n, directed=False): """ 构造稠密图 n: 节点数 directed: 是否有向 """ # 存储节点数 self.__n=n # 存储边数, 初始化为 0 self.__m=0 # 存储是否有向 self.__directed=directed # 邻接矩阵, (n, n),表示节点的是否连接,初始化为False self.__g= [ [False for i in range(n)] for i in range(n)] # 用来判断 两个节点是否相连 # 能够相连的节点,其对应的id属性值一样 self.__id=[-1 for i in range(self.__n)] def V(self): """ 图中有多少个节点 """ return self.__n def E(self): """ 图中有多少个边 """ return self.__m def hasEdge(self, v, w): """ 判断节点是否存在边 v:节点索引 w:节点索引 返回bool """ assert v>=0 and v=0 and w=0 and v<=self.__n and w>=0 and w=0 and v=0 and v=0 and w, 2021, # Cambridge University Press. # # Plot of the real and imaginary components of the index of refraction of silicon at T=300 K. # Data are from [Green 2008, Solar Energy Materials and Solar Cells, 92, 1305](https://www.sciencedirect.com/science/article/pii/S0927024808002158), Table 1 (doi: 10.1016/j.solmat.2008.06.009) # # + # %matplotlib inline import os import sys import math import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator, LogLocator, NullFormatter import warnings warnings.filterwarnings('ignore',category=UserWarning, append=True) # - # ## Standard Plot Format # # Setup the standard plotting format and make the plot. Fonts and resolution adopted follow CUP style. # # + figName = 'Fig6_4' # graphic aspect ratio = width/height aspect = 4.0/3.0 # 4:3 # Text width in inches - don't change, this is defined by the print layout textWidth = 6.0 # inches # output format and resolution figFmt = 'png' dpi = 600 # Graphic dimensions plotWidth = dpi*textWidth plotHeight = plotWidth/aspect axisFontSize = 10 labelFontSize = 8 lwidth = 0.5 axisPad = 5 wInches = textWidth hInches = wInches/aspect # Plot filename plotFile = f'{figName}.{figFmt}' # LaTeX is used throughout for markup of symbols, Times-Roman serif font plt.rc('text', usetex=True) plt.rc('font', **{'family':'serif','serif':['Times-Roman'],'weight':'bold','size':'16'}) # Font and line weight defaults for axes matplotlib.rc('axes',linewidth=lwidth) matplotlib.rcParams.update({'font.size':axisFontSize}) # axis and label padding plt.rcParams['xtick.major.pad'] = f'{axisPad}' plt.rcParams['ytick.major.pad'] = f'{axisPad}' plt.rcParams['axes.labelpad'] = f'{axisPad}' # - # ## Silicon index of refraction # # Data from Green 1980... # # Data are multicolumn ASCII text with # comments and a data header. We use # * lamr - wavelength in $\mu$m for n$_r$ # * nr - real part of the index of refraction (n$_r$) # * lami - wavelength in $\mu$m for n$_i$ # * ni - imaginary part of the index of refraction (n$_i$) # # + dataFile = 'Green2008_Si300K.txt' # data file has a text header, colhead1 colhead2 ... colheadN data = pd.read_csv(dataFile,sep=r'\s+',comment='#') lamr = np.array(data['lamr']) nr = np.array(data['nr']) lami = np.array(data['lami']) ni = np.array(data['ni']) # - # ## Plot the refraction index # # Real part as a solid line, imaginary part as a dotted line, labeled. # + xMin = 0.2 # microns xMax = 1.45 yMin = -0.01 yMax = 7.5 fig,ax = plt.subplots() fig.set_dpi(dpi) fig.set_size_inches(wInches,hInches,forward=True) ax.tick_params('both',length=6,width=lwidth,which='major',direction='in',top='on',right='on') ax.tick_params('both',length=3,width=lwidth,which='minor',direction='in',top='on',right='on') # Limits plt.xlim(xMin,xMax) ax.xaxis.set_major_locator(MultipleLocator(0.2)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) plt.xlabel(r'$\lambda$ [$\mu$m]',fontsize=axisFontSize) plt.ylim(yMin,yMax) ax.yaxis.set_major_locator(MultipleLocator(1)) ax.yaxis.set_minor_locator(MultipleLocator(0.5)) plt.ylabel(r'Index of refraction',fontsize=axisFontSize) # Real part plt.plot(lamr,nr,'-',color='black',lw=1.2,zorder=10) plt.text(0.41,6.0,r'Real ($n_{\rm r}$)',fontsize=labelFontSize,ha='left',color='black') # imaginary part plt.plot(lami,ni,':',color='black',lw=1.2,zorder=10) plt.text(0.38,2.5,r'Imaginary ($n_{\rm i}$)',fontsize=labelFontSize,ha='left',color='black') plt.plot() plt.savefig(plotFile,bbox_inches='tight',facecolor='white') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Exercise 6 - Fuzzy Set Operations # + [markdown] tags=[] # #### Fuzzy Set Operations: # # | S. no. | Operation | Definition using membership function | # | ------ | -------------------- | --------------------------------------------------------- | # | 1. | Complement | $$\mu_{\bar A}(x)= 1-\mu_A(x)$$ | # | 2. | Union | $$\mu_{A\cup B}(x) = \max(\mu_A(x),\mu_B(x)))$$ | # | 3. | Intersection | $$\mu_{A\cap B}(x) = \min(\mu_A(x),\mu_B(x))$$ | # | 4. | Algebraic Product | $$\mu_{A \cdot B}(x) = \mu_A(x)\cdot \mu_B(x)$$ | # | 5. | Algebraic Sum | $$\mu_{A+B}(x) = \mu_A(x)+\mu_B(x) - \mu_{A \cdot B}(x)$$ | # | 6. | Algebraic Difference | $$\mu_{A-B}(x) = \mu_{A\cap \bar B}(x)$$ | # | 7. | Bounded Product | $$\mu_{A\odot B}(x) =\max(\mu_A(x)+\mu_B(x)-1,0)$$ | # | 8. | Bounded Sum | $$\mu_{A\oplus B}(x) =\min(\mu_A(x)+\mu_B(x),1)$$ | # | 9. | Bounded Difference | $$\mu_{A\ominus B}(x) = \max(\mu_A(x)-\mu_B(x),0)$$ | # - # ### Implementation using Matlab or Octave # + [markdown] tags=[] # #### Aim: # To write a MATLAB or Octave program to find algebraic sum, algebraic subtraction, algebraic product, bounded sum, bounded subtraction and bounded product of two fuzzy sets. # + [markdown] tags=[] # #### Program: # - # ```octave # % Defining operations as functions # complement = @(A) 1 - A # union = @(A,B) max(A,B) # intersection = @(A,B) min(A,B) # algebraic_prod = @(A,B) A.*B # algebraic_sum = @(A,B) A+B - algebraic_prod(A,B) # algebraic_diff = @(A,B) A + complement(B) # bounded_prod = @(A,B) union(A+B-1,0) # bounded_sum = @(A,B) intersection(A+B,1) # bounded_diff = @(A,B) union(A-B,0) # # % Declaring two fuzzy sets for testing # A = [1 .5] # B = [.4 .2] # # % Displaying the results # disp('The algebraic sum') # disp(algebraic_sum(A,B)) # disp('The algebraic difference') # disp(algebraic_diff(A,B)) # disp('The algebraic product') # disp(algebraic_prod(A,B)) # disp('The bounded sum') # disp(bounded_sum(A,B)) # disp('The bounded difference') # disp (bounded_diff(A,B)) # disp('The bounded product') # disp(bounded_prod(A,B)) # ``` # #### Output: # ``` # The algebraic sum # 1.0000 0.6000 # The algebraic difference # 1.6000 1.3000 # The algebraic product # 0.4000 0.1000 # The bounded sum # 1.0000 0.7000 # The bounded difference # 0.6000 0.3000 # The bounded product # 0.4000 0 # ``` # ### Implementation using Python # + [markdown] tags=[] # #### Aim: # To write a python program to find algebraic sum, algebraic subtraction, algebraic product, bounded sum, bounded subtraction and bounded product of two fuzzy sets. # - # #### Program import numpy as np # + tags=[] def complement(A): return 1-A def union(A,B): return np.max([A,B],axis=0) def intersection(A,B): return np.min([A,B],axis=0) def algebraic_prod(A,B): return A*B def algebraic_sum(A,B): return A+B-algebraic_prod(A,B) def algebraic_diff(A,B): return A+complement(B) def bounded_prod(A,B): return union(A+B-1,np.zeros_like(A)) def bounded_sum(A,B): return intersection(A+B,np.ones_like(A)) def bounded_diff(A,B): return union(A-B,np.zeros_like(A)) # + A=np.array([1, .5]) B=np.array([.4, .2]) print('The algebraic sum',algebraic_sum(A,B)) print('The algebraic difference',algebraic_diff(A,B)) print('The algebraic product',algebraic_prod(A,B)) print('The bounded sum',bounded_sum(A,B)) print('The bounded difference',bounded_diff(A,B)) print('The bounded product',bounded_prod(A,B)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analytical functions # ## eval() function x=3 eval('x**2+2*x-2') # ## len() function numbers=[4,76,2,34,12,9] len(numbers) # ## factorial() function import math math.factorial(5) math.factorial(9) # ## sort() function runs=[45,10,150,76,28,90,200] runs.sort(reverse=True) runs # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 ana-1.2.9-py3 # language: python # name: ana-1.2.9-py3 # --- import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from lmfit import Model #matplotlib.rcParams.keys() plt.style.use('dark_background') mpl.rcParams['figure.figsize'] = (11, 7) mpl.rc('font', size=14) length = 8 width = 1.5 mpl.rcParams['xtick.major.size'] = length mpl.rcParams['ytick.major.size'] = length mpl.rcParams['xtick.major.width'] = width mpl.rcParams['ytick.major.width'] = width """ CONSTANTS """ e_0 = 8.85E-12 # [s/(Ohm*m)] c = 299792458 # [m/s] h = 4.135667516e-15 # eV*s # # XPCS spot size # + def speckle_size(lam, footprint, L): return lam*L/footprint display = 'plot' E = 1200 # eV lam = h*c/E*1e6 # lam = 1E-3 # [um] iangle = np.deg2rad(90) spot_size = np.arange(45,260,10) # [um] footprint = spot_size/np.sin(iangle) L = 3.5E6 # [um] S = speckle_size(lam, footprint, L) if display=='print': if S.ndim!=1: S = np.asarray([S]) st = 'Speckle size: ' + ', '.join('{:0.2f}'.format(k) for k in S) + ' [um]' print(st) elif display=='plot': x = np.argmax([np.array([lam]).size, np.array([footprint]).size, np.array([L]).size]) if x==0: x = lam*1000 xlabel = 'Wavelength [nm]' elif x==1: x = footprint xlabel = 'Footprint size [um]' elif x==2: x = L*10**-6 xlabel = 'Samplt - det distance [m]' fig, ax = plt.subplots() ax.plot(x,S,'-o') ax.set_xlabel(xlabel) ax.set_ylabel('Speckel size [um]') ax.grid() plt.show() # - # # Optical properties # + """ MATERIAL AND LIGHT DATA """ sigma = 500 # [1/(Ohm*cm)] sigma = sigma*100 # [1/(Ohm*m)] e_r = 0.5 # [] real part of epsilon wvl = 800E-9 # [m] wavelength f = c/wvl # [1/s] omega = 2*np.pi*f # [1/s] """#########################################################################""" n = np.sqrt(e_r+1j*sigma/e_0/omega) alpha = 2*n.imag*omega/c str = '\nPenetration depth 1/alpha = {:0.2f} [nm]'.format((1/alpha)*10**9) print(str) """ REFLECTIVITY """ nr = n.real; ni = n.imag; thetai = np.deg2rad(np.arange(0,91,1)) thetat = np.arcsin( np.sin(thetai)/n ) r = (np.cos(thetai)-n*np.cos(thetat)) / (np.cos(thetai)+n*np.cos(thetat)) R = np.absolute( r )**2 # reflectivity at incident angle theta1 (s-pol) R_N = ( 1-n / (1+n) )**2 # normal incidence reflectivity """ p-polarized light """ num = -( (nr**2-ni**2+1j*(2*nr*ni)) )*np.cos(thetai) + np.sqrt( (nr**2-ni**2-np.sin(thetai)**2+1j*2*nr*ni) ) den = ( (nr**2-ni**2+1j*(2*nr*ni)) )*np.cos(thetai) + np.sqrt( (nr**2-ni**2-np.sin(thetai)**2+1j*2*nr*ni) ) R_p = np.absolute(num / den)**2 """ s-polarized light """ num = np.cos(thetai) - np.sqrt( (nr**2-ni**2-np.sin(thetai)**2+1j*2*nr*ni) ) den = np.cos(thetai) + np.sqrt( (nr**2-ni**2-np.sin(thetai)**2+1j*2*nr*ni) ) R_s = np.absolute(num / den)**2 thetai = np.rad2deg(thetai) thetat = np.rad2deg(thetat.real) plt.figure('Reflectivity') plt.title('Reflectivity') plt.plot(thetai, R_p, label='p-polarized') plt.xlabel('incident angle') plt.ylabel('reflectance') plt.plot(thetai, R_s, label='s-polarized') plt.xlabel('incident angle') plt.ylabel('reflectance') plt.legend(loc='upper left') plt.grid() plt.figure('Refraction') plt.title('Refraction') plt.plot(thetai, thetat) plt.xlabel('theta_i') plt.ylabel('theta_t') plt.grid() plt.show() # - # # Fluence # + """ material properties """ """ low T """ z0 = 52 # effective penetration depth [nm] T = 0.8 # transmission Cp = 5 # heat capacity [J/K(/mol)] """ high T """ z0 = 64 T = 0.82 Cp = 10 """ laser properties """ power = 5 # [mW] rep_rate = 1000 # [Hz] spotx = 0.046 # FWHM [cm] spoty = 0.052 # FWHM [cm] err_spot = 0.001 # [cm] """ fluence """ fluence = power/rep_rate/spotx/spoty # [mJ/cm^2] fluence_max = power/rep_rate/(spotx-err_spot)/(spoty-err_spot) fluence_min = power/rep_rate/(spotx+err_spot)/(spoty+err_spot) print('\nfluence = {:0.2f} ({:1.2f}, {:2.2f}) mJ/cm^2\n'.format(fluence, fluence_min-fluence, fluence_max-fluence)) """ energy density """ dlayer = 1 # [nm] flu_top = fluence*T*np.exp(-0*dlayer/z0) flu_bottom = fluence*T*np.exp(-1*dlayer/z0) n0 = (flu_top-flu_bottom)/(dlayer*1E-7)/1000 flu_top = fluence_max*T*np.exp(-0*dlayer/z0) flu_bottom = fluence_max*T*np.exp(-1*dlayer/z0) n0_max = (flu_top-flu_bottom)/(dlayer*1E-7)/1000 flu_top = fluence_min*T*np.exp(-0*dlayer/z0) flu_bottom = fluence_min*T*np.exp(-1*dlayer/z0) n0_min = (flu_top-flu_bottom)/(dlayer*1E-7)/1000 print('n = {:0.2f} ({:1.2f}, {:2.2f}) mJ/cm^2\n'.format(n0, n0_min-n0, n0_max-n0)) """ Average heating """ heating = power/rep_rate/Cp*T *1000 #[mK] print('heating = {:0.3f} K'.format(heating)) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="tB3CJiNv0snT" executionInfo={"status": "ok", "timestamp": 1635302679728, "user_tz": 180, "elapsed": 944, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj6Xw9kJid9xbrLXlZ-X6GkHj5jEeCPZu5k8Cburfw=s64", "userId": "04298374874242312476"}} import numpy as np # + id="yUZ__LtcvFO6" executionInfo={"status": "ok", "timestamp": 1635304529911, "user_tz": 180, "elapsed": 293, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj6Xw9kJid9xbrLXlZ-X6GkHj5jEeCPZu5k8Cburfw=s64", "userId": "04298374874242312476"}} def trapezoid(f, a, b, n): x, h = np.linspace(a, b, n+1, retstep=True) for xi, fxi in zip(x, f(x)): print(f'{xi}\t{fxi}') total = 0. for i, xi in enumerate(x): total += (1 if i == 0 or i == x.shape[0] - 1 else 2) * f(xi) return (h/2) * total def simpson(f, a, b, n): x, h = np.linspace(a, b, n+1, retstep=True) total = 0. for i, xi in enumerate(x): coefficient = 0. if i == 0 or i == x.shape[0] - 1: coefficient = 1 elif i % 2 == 0: coefficient = 2 else: coefficient = 4 total += coefficient * f(xi) return (h/3) * total # + colab={"base_uri": "https://localhost:8080/"} id="G9HUjWeRxByz" executionInfo={"status": "ok", "timestamp": 1635302728843, "user_tz": 180, "elapsed": 302, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj6Xw9kJid9xbrLXlZ-X6GkHj5jEeCPZu5k8Cburfw=s64", "userId": "04298374874242312476"}} outputId="d8cfb22b-dc75-453b-eff9-e626c2beda20" p = lambda x: 5.7384 - 7.7245*x - 9.6988 * x**2 - 6.5542 * x**3 + 8.7136 * x**4 - 9.044 * x**5 simpson(p, -1.1228, 1.859, 4) # + colab={"base_uri": "https://localhost:8080/"} id="j0B-BbpluMEF" executionInfo={"status": "ok", "timestamp": 1635304145248, "user_tz": 180, "elapsed": 300, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj6Xw9kJid9xbrLXlZ-X6GkHj5jEeCPZu5k8Cburfw=s64", "userId": "04298374874242312476"}} outputId="dccf56f8-e7d7-43dd-d894-22cb88fa5bc7" f = lambda x: np.exp(x) trapezoid(f, 0, 9, 12) # + colab={"base_uri": "https://localhost:8080/"} id="NasWGePXuuPj" executionInfo={"status": "ok", "timestamp": 1635304531626, "user_tz": 180, "elapsed": 15, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj6Xw9kJid9xbrLXlZ-X6GkHj5jEeCPZu5k8Cburfw=s64", "userId": "04298374874242312476"}} outputId="b3f0ed9f-78fa-49ed-aee0-d1b4fd94b245" f = lambda x: x*x*np.sqrt(x*x + 1) trapezoid(f, 1, 2, 7) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [source](../../api/alibi_detect.cd.context_aware.rst) # # Context-Aware Maximum Mean Discrepancy # # ## Overview # # The context-aware maximum mean discrepancy drift detector ([Cobb and , 2022](https://arxiv.org/abs/2203.08644)) is a kernel based method for detecting drift in a manner that can take relevant context into account. A normal drift detector detects when the distributions underlying two sets of samples $\{x^0_i\}_{i=1}^{n_0}$ and $\{x^1_i\}_{i=1}^{n_1}$ differ. A context-aware drift detector only detects differences that can **not** be attributed to a corresponding difference between sets of associated context variables $\{c^0_i\}_{i=1}^{n_0}$ and $\{c^1_i\}_{i=1}^{n_1}$. # # Context-aware drift detectors afford practitioners the flexibility to specify their desired context variable. It could be a transformation of the data, such as a subset of features, or an unrelated indexing quantity, such as the time or weather. Everything that the practitioner **wishes to allow to change** between the reference window and test window should be captured within the context variable. # # On a technical level, the method operates in a manner similar to the [maximum mean discrepancy detector](./mmddrift.ipynb). However, instead of using an estimate of the squared difference between kernel mean embeddings of $X_{\text{ref}}$ and $X_{\text{test}}$ as the test statistic, we now use an estimate of the *expected* squared difference between the kernel [*conditional* mean embeddings](https://arxiv.org/abs/2002.03689) of $X_{\text{ref}}|C$ and $X_{\text{test}}|C$. As well as the kernel defined on the space of data $X$ required to define the test statistic, estimating the statistic additionally requires a kernel defined on the space of the context variable $C$. For any given realisation of the test statistic an associated p-value is then computed using a [conditional permutation test](https://www.jstor.org/stable/2288402). # # The detector is designed for cases where the training data contains a rich variety of contexts and individual test windows may cover a much more limited subset. **It is assumed that the test contexts remain within the support of those observed in the reference set**. # # ## Usage # # ### Initialize # # # Arguments: # # * `x_ref`: Data used as reference distribution. # * `c_ref`: Context for the reference distribution. # # # Keyword arguments: # # * `backend`: Both **TensorFlow** and **PyTorch** implementations of the context-aware MMD detector as well as various preprocessing steps are available. Specify the backend (*tensorflow* or *pytorch*). Defaults to *tensorflow*. # # * `p_val`: p-value used for significance of the permutation test. # # * `preprocess_x_ref`: Whether to already apply the (optional) preprocessing step to the reference data `x_ref` at initialization and store the preprocessed data. Dependent on the preprocessing step, this can reduce the computation time for the predict step significantly, especially when the reference dataset is large. Defaults to *True*. It is possible that it needs to be set to *False* if the preprocessing step requires statistics from both the reference and test data, such as the mean or standard deviation. # # * `update_ref`: Reference data can optionally be updated to the last N instances seen by the detector. The parameter should be passed as a dictionary *{'last': N}*. # # * `preprocess_fn`: Function to preprocess the data (`x_ref` and `x`) before computing the data drift metrics. Typically a dimensionality reduction technique. **NOTE**: Preprocessing is not applied to the context data. # # * `x_kernel`: Kernel defined on the data `x_*`. Defaults to a Gaussian RBF kernel (`from alibi_detect.utils.pytorch import GaussianRBF` or `from alibi_detect.utils.tensorflow import GaussianRBF` dependent on the backend used). # # * `c_kernel`: Kernel defined on the context `c_*`. Defaults to a Gaussian RBF kernel (`from alibi_detect.utils.pytorch import GaussianRBF` or `from alibi_detect.utils.tensorflow import GaussianRBF` dependent on the backend used). # # * `n_permutations`: Number of permutations used in the conditional permutation test. # # * `prop_c_held`: Proportion of contexts held out to condition on. # # * `n_folds`: Number of cross-validation folds used when tuning the regularisation parameters. # # * `batch_size`: If not `None`, then compute batches of MMDs at a time rather than all at once which could lead to memory issues. # # * `input_shape`: Optionally pass the shape of the input data. # # * `data_type`: can specify data type added to the metadata. E.g. *'tabular'* or *'image'*. # # * `verbose`: Whether or not to print progress during configuration. # # # Additional PyTorch keyword arguments: # # * `device`: *cuda* or *gpu* to use the GPU and *cpu* for the CPU. If the device is not specified, the detector will try to leverage the GPU if possible and otherwise fall back on CPU. # # # Initialized drift detector example with the PyTorch backend: # # # ```python # from alibi_detect.cd import ContextMMDDrift # # cd = ContextMMDDrift(x_ref, c_ref, p_val=.05, backend='pytorch') # ``` # # The same detector in TensorFlow: # # ```python # from alibi_detect.cd import ContextMMDDrift # # cd = ContextMMDDrift(x_ref, c_ref, p_val=.05, backend='tensorflow') # ``` # ### Detect Drift # # We detect data drift by simply calling `predict` on a batch of test or deployment instances `x` and contexts `c`. We can return the p-value and the threshold of the permutation test by setting `return_p_val` to *True* and the context-aware maximum mean discrepancy metric and threshold by setting `return_distance` to *True*. We can also set `return_coupling` to *True* which additionally returns the coupling matrices $W_\text{ref,test}$, $W_\text{ref,ref}$ and $W_\text{test,test}$. As illustrated in the examples ([text](../../examples/cd_context_20newsgroup.ipynb), [ECGs](../../examples/cd_context_ecg.ipynb)) this can provide deep insights into where the reference and test distributions are similar and where they differ. # # The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys: # # * `is_drift`: 1 if the sample tested has drifted from the reference data and 0 otherwise. # # * `p_val`: contains the p-value if `return_p_val` equals *True*. # # * `threshold`: p-value threshold if `return_p_val` equals *True*. # # * `distance`: conditional MMD^2 metric between the reference data and the new batch if `return_distance` equals *True*. # # * `distance_threshold`: conditional MMD^2 metric value from the permutation test which corresponds to the the p-value threshold. # # * `coupling_xx`: coupling matrix $W_\text{ref,ref}$ for the reference data. # # * `coupling_yy`: coupling matrix $W_\text{test,test}$ for the test data. # # * `coupling_xy`: coupling matrix $W_\text{ref,test}$ between the reference and test data. # # # ```python # preds = cd.predict(x, c, return_p_val=True, return_distance=True, return_coupling=True) # ``` # ### Saving and loading # # Coming soon! # ## Examples # # # ### Text # # [Context-aware drift detection on news articles](../../examples/cd_context_20newsgroup.ipynb) # # ### Time series # # [Context-aware drift detection on ECGs](../../examples/cd_context_ecg.ipynb) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import torch N, D_in, H, D_out = 64,1000,100,10 # + x = torch.randn(N,D_in) y = torch.randn(N,D_out) w1 = torch.randn(D_in,H,requires_grad=True) w2 = torch.randn(H,D_out,requires_grad=True) # - learning_rate = 1e-6 for t in range(500): # mm is similar with matmul, but not support broadcast y_pred = x.mm(w1).clamp(min=0).mm(w2) loss = (y_pred - y).pow(2).sum() if t % 100 == 0: print(loss) loss.backward() with torch.no_grad(): w1 -= learning_rate*w1.grad w2 -= learning_rate*w2.grad w1.grad.zero_() w2.grad.zero_() # ## PyTorch nn # + model = torch.nn.Sequential( torch.nn.Linear(D_in,H), torch.nn.ReLU(), torch.nn.Linear(H,D_out)) learning_rate = 1e-4 optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) for t in range(500): y_pred = model(x) loss = torch.nn.functional.mse_loss(y_pred,y) if t % 100 == 0: print(loss) loss.backward() optimizer.step() optimizer.zero_grad() # - # ## PyTorch DataLoaders import torch from torch.utils.data import DataLoader,TensorDataset import numpy as np # + N, D_in, H, D_out = 64,1000,100,10 # x = torch.randn(N,D_in) # y = torch.randn(N,D_out) x = np.random.randn(N,D_in) y = np.random.randn(N,D_out) # - x = torch.Tensor(x) y = torch.Tensor(y) loader = DataLoader(TensorDataset(x,y),batch_size=8) class TwoLayerNet(torch.nn.Module): def __init__(self,D_in,H,D_out): super(TwoLayerNet,self).__init__() self.linear1 = torch.nn.Linear(D_in,H) self.linear2 = torch.nn.Linear(H,D_out) def forward(self,x): h_relu = self.linear1(x).clamp(min=0) # clamp(min=0) is the same as ReLU function y_pred = self.linear2(h_relu) return y_pred model = TwoLayerNet(D_in,H,D_out) optimizer = torch.optim.SGD(model.parameters(),lr=1e-2) for epoch in range(20): epoch_loss = 0 for x_batch,y_batch in loader: y_pred = model(x_batch) loss = torch.nn.functional.mse_loss(y_pred,y_batch) epoch_loss += loss loss.backward() optimizer.step() optimizer.zero_grad() print('epoch {}, loss:{}'.format(epoch,epoch_loss)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.3 64-bit # language: python # name: python37364bit3ca3215d4e6240698248485fce128d4d # --- # + # External libraries import requests import nbimporter # Builtins import json import os import concurrent.futures import functools import zipfile import pathlib import urllib.request # - # ### Establish sprites url # + db_path = "./data/pokemon_details_pokeapi/" # Make directries (recursively) if not exist. pathlib.Path(db_path).mkdir(parents=True, exist_ok=True) sprite_urls = [] for file in os.listdir(db_path): with open(db_path + file) as f: data = json.load(f) if data["sprites"]["front_default"]: sprite_urls.append((data["id"], data["species"]["name"] , data["sprites"]["front_default"])) print(f"There are {len(sprite_urls)} sprites in total") # - # `sprite_urls` looks like this # ```python # [(10001, 'deoxys', 'https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/10001.png'), # (10002, 'deoxys', 'https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/10002.png'), # (10003, 'deoxys', 'https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/10003.png'), # ... # ] # ``` # ### Download Sprites using 10 threads def download_sprite(sprite_url, overwrite=False): id, name, url = sprite_url path = f"./data/pokemon_sprites_pokeapi/{id}_{name}.png" if overwrite or not os.path.exists(path): urllib.request.urlretrieve(url, path) # Multithreading no_threads = 10 with concurrent.futures.ThreadPoolExecutor(max_workers=no_threads) as executor: executor.map(download_sprite, sprite_urls) print(f"Finish downloading all sprites") # ### Make a zip file for archiving path = "./data/pokemon_sprites_pokeapi/" with zipfile.ZipFile(f'{path[:-1]}.zip','w') as zip_file: for file in os.listdir(f"{path}"): zip_file.write(f"{path}{file}", f"{file}", compress_type=zipfile.ZIP_DEFLATED) print(f"Zip to {path[:-1]}.zip successfully") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} from gs_quant.session import Environment, GsSession # external users should substitute their client id and secret; please skip this step if using internal jupyterhub GsSession.use(Environment.PROD, client_id=None, client_secret=None) # - # # Macro Models # # The GS Quant `MacroRiskModel` class gives users the power to upload their own risk models to Marquee for seamless integration with the Marquee Portfolio Analytics and Plot Tool Pro suite. After uploading a custom `MacroRiskModel`, users can access their Macro model data programmatically using GS Quant. # ## Create a Macro Model # # Input fields to create the initial Macro Risk Model object # # | Attribute |Can be Modified |Description # |-----------------|-------------------|------------- # | id | No |Model id| # | name | Yes |Name of model| # | description | Yes |Longer description of model| # | term | Yes |Term or horizon of model. One of: Long, Medium, Short| # | coverage | Yes |Geographical coverage of assets within model universe. One of: Global, Region, Region Excluding Countries, Country| # | vendor | Yes |Who creates the model| # | version | Yes |Version of model| # | identifier | No |Identifier used to upload the model's asset universe. One of: sedol, cusip, bcid, gsid| # | entitlements | Yes |Who can manage, edit, and view the risk model| # # + pycharm={"name": "#%%\n"} from gs_quant.models.risk_model import MacroRiskModel, RiskModelCalendar, Term, CoverageType, UniverseIdentifier model_id = 'MY_MODEL' model_name = 'My Risk Model' description = 'My Custom Macro Risk Model' term = Term.Medium coverage = CoverageType.Country universe_identifier = UniverseIdentifier.sedol vendor = '' # + pycharm={"name": "#%%\n"} # create model with inputs model = MacroRiskModel( id_=model_id, name=model_name, description=description, coverage=coverage, term=term, universe_identifier=universe_identifier, vendor=vendor, version=1, ) model.save() # - # ## Upload a Calendar To Your Model # The calendar associated with the Macro Risk Model contains the dates which the risk model should have posted data on to be considered "complete." The calendar can go further back as well as forward in time than the data that is currently posted for the calendar, but there cannot be any gaps in the data posted to the risk model according to the calendar. # + pycharm={"name": "#%%\n"} calendar = RiskModelCalendar([ '2021-01-29', '2021-01-28', '2021-01-27', '2021-01-26', '2021-01-25', '2021-01-22', '2021-01-21', '2021-01-20', '2021-01-19', '2021-01-18', '2021-01-15', '2021-01-14', '2021-01-13', '2021-01-12', '2021-01-11', '2021-01-08', '2021-01-07', '2021-01-06', '2021-01-05', '2021-01-04', '2021-01-01' ]) model.upload_calendar(calendar) # - # ## Upload Data To Your Model # # Once the calendar is posted for a model, we can start uploading data to it. We can supply data multiple ways: # # 1. Upload total data one day at a time # 2. Upload partial data one day at a time # # For a complete day of data, we need three things, defined in `RiskModelData` # 1. Factor Data # - factorId: Can be any string, but needs to map consistently to the same factor across every date # - factorName: Can be any string, will be the display name of the factor, should be consistent across every date # - factorCategoryId: Id of the category that the factor belongs to # - factorCategory: Name of the category that the factor belongs to, will be the display name of the category (Style, Industry, Market, Currency, ect.) # - factorReturn: Daily return of the factor in percent units # 2. Asset Data # - universe: Array of assets in the universe # - factorExposure: Array of dictionaries that map factorId to the factor exposure of that asset, corresponds to ordering of asset universe # - specificRisk: Array of annualized specific risk in percent units, corresponds to ordering of asset universe (null values not allowed) # - totalRisk: (optional) Array of total risk in percent units, corresponds to ordering of asset universe (null values not allowed) # - historicalBeta: (optional) Array of historical beta, corresponds to ordering of asset universe (null values not allowed) # # ### Upload Full Data # + pycharm={"name": "#%%\n"} data = { 'date': '2021-01-13', # Note: You can only upload to dates in your risk model's calendar 'assetData': { 'universe': ['B02V2Q0', '6560713', 'B3Q15X5', '0709954'], 'specificRisk': [12.09, 45.12, 3.09, 1.0], 'factorExposure': [ {'1': 0.23, '2': 0.023}, {'1': 0.023, '2': 2.09, '3': 0.3}, {'1': 0.063, '2': 2.069, '3': 0.73}, {'2': 0.067, '3': 0.93} ], 'totalRisk': [12.7, 45.5, 12.7, 10.3] }, 'factorData': [ { 'factorId': '1', 'factorName': 'USD', 'factorCategory': 'Currency', 'factorCategoryId': 'CUR', 'factorReturn': 0.5 }, { 'factorId': '2', 'factorName': 'JPY 1Y Basis Swap', 'factorCategory': 'GDP', 'factorCategoryId': 'GDP', 'factorReturn': 0.3 }, { 'factorId': '3', 'factorName': 'US HY', 'factorCategory': 'Credit Spreads', 'factorCategoryId': 'CDS', 'factorReturn': 0.2 } ] } model.upload_data(data) # - # ## Query Data From Model # # Once the data is uploaded, you can query it back using the same class # + pycharm={"name": "#%%\n"} from gs_quant.models.risk_model import Measure, DataAssetsRequest import datetime as dt model = MacroRiskModel.get(model_id) # get multiple measures across a date range for a universe specified start_date = dt.date(2021, 1, 13) end_date = dt.date(2021, 1, 13) universe_for_request = DataAssetsRequest(universe_identifier.value, []) # an empty assets request returns the full universe data_measures = [Measure.Universe_Factor_Exposure, Measure.Asset_Universe, Measure.Specific_Risk, Measure.Total_Risk, Measure.Factor_Id, Measure.Factor_Name, Measure.Factor_Category, Measure.Factor_Category_Id, Measure.Factor_Return ] macro_factor_data = model.get_data(data_measures, start_date, end_date, universe_for_request, limit_factors=True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [phitar](https://jangcom.github.io/phitar/): Testing of the e-type = 2 source subsection # # by # # * Last: 2020-06-06 # * First: 2020-04-27 # ## Introduction # # Options for setting source energy distributions will be added to phitar v1.04. One of the options is a Gaussian energy distribution setter that uses the e-type = 2 subsection of PHITS. Before beginning the update, here we examine if the e-type = 2 subsection leads to equivalent results of the e0 command used until phitar v1.03. In addition, whether the e-type = 2 can be used as the e0 command, in which case the former can completely replace the latter, is also tested. import re import subprocess import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.ticker import AutoMinorLocator from IPython.display import set_matplotlib_formats from IPython.display import SVG # + # I/O out_path = './out' # Outbound phitar_fig_path = '../../../figs' # Inbound phits_path = '../phits' # pandas flue_header = ['Energy (MeV)', 'Monte Carlo fluence (cm$^{-2}$ source$^{-1}$)'] # MPL mpl.rcParams.update({'font.sans-serif': 'Arial', 'pdf.fonttype': 42}) plt.style.use('bmh') xlabpad = 15 ylabpad = 15 # IPython.display set_matplotlib_formats('svg') # - def disp_rows_of_int(lines, rows, rows_of_int): """Display the rows of interest of a PHITS input file.""" for i in range(len(lines)): for row in rows_of_int: if i in rows[row]['range']: if i == rows[row]['range'][0]: print(rows[row]['header']) print(lines[i]) def save_curr_fig(out_bname, out_path=out_path): """Save a figure in multiple formats.""" out_bname_full = '{}/{}'.format(out_path, out_bname) for fmt in ['pdf', 'svg']: plt.savefig('{}.{}'.format(out_bname_full, fmt), bbox_inches='tight') rtn = subprocess.run('inkscape {}.svg -M {}.emf'.format(out_bname_full, out_bname_full)) def plot_flue(ax, xdata, ydata, xmin=30, xmax=40, clr='r', mrk='o', ls='None', lab='e0', loc=0): """Plot particle fluence against energy.""" ax.set_xlim(xmin, xmax) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.yaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(axis='both', which='major', direction='in', length=5) ax.tick_params(axis='both', which='minor', direction='in', length=3) ax.set_xlabel(flue_header[0], labelpad=xlabpad) ax.set_ylabel(flue_header[1], labelpad=ylabpad) ax.plot(xdata, ydata, color=clr, marker=mrk, linestyle=ls, label=lab) ax.legend(loc=loc) # ## Comparison using T-Cross tallies # ### Geometry and tally settings # Part of a PHITS input file 'geom.inp' # + # I/O f = '{}/geom.inp'.format(phits_path) fh = open(f) lines = [v.rstrip() for v in fh] fh.close() rows = { 'params': { 'header': '', 'range': [22, 23], }, 't_cross': { 'header': '', 'range': range(175, 200), }, 't_gshow': { 'header': '', 'range': [203, 218, 223], }, } rows_of_int = [ 'params', 't_cross', 't_gshow', ] disp_rows_of_int(lines, rows, rows_of_int) # - # Run the PHITS input file with icntl = 7 and obtain a G-Show file. # !phits ../phits/geom.inp # Obtain a descriptive figure by decorating the G-Show file. SVG(filename='{}/etype2.svg'.format(phitar_fig_path)) # * Electron beams begin propagation at z = -50 mm # toward the +z direction with an incidence angle of 0 deg. # * The energy spectra of "intact" electron beams are measured # by using a T-Cross tally placed at an "ambient" virtual plane at z = -40 mm. # * The T-Cross tally output is set to be "flux", # but is equivalent to "current" because the incidence angle is 0 deg # (p. 210, PHITS v3.02 JP man). # ### Monoenergetic # Part of a PHITS input file 'e0.inp' # + # I/O f = '{}/e0.inp'.format(phits_path) fh = open(f) lines = [v.rstrip() for v in fh] fh.close() rows = { 'params': { 'header': '', 'range': [22, 23], }, 'source': { 'header': '', 'range': range(47, 58), }, } rows_of_int = [ 'params', 'source', ] disp_rows_of_int(lines, rows, rows_of_int) # - # Run the PHITS input with icntl = 0. # !phits ../phits/e0.inp # Check the T-Cross result. t_cross_bname_full = '{}/e0-cross-eng-intact'.format(phits_path) t_cross = '{}.ang'.format(t_cross_bname_full) t_cross_trimmed = '{}_trimmed.ang'.format(t_cross_bname_full) # Read in the T-Cross result file. t_cross_fh = open(t_cross) t_cross_lines = [v for v in t_cross_fh] t_cross_fh.close() # Retrieve only the data part and write them to a file. data_rows = range(251, 1252) t_cross_data = [t_cross_lines[v] for v in data_rows] t_cross_data[0] = re.sub('#(.*)', ' \\1', t_cross_data[0]) t_cross_trimmed_fh = open(t_cross_trimmed, 'w') for line in t_cross_data: t_cross_trimmed_fh.write(line) t_cross_trimmed_fh.close() df = pd.read_csv(t_cross_trimmed, sep=r'\s+') df plot_flue(plt.subplot(), df.loc[:, 'e-upper'], df.loc[:, 'electron']) save_curr_fig('e0') # The total electron fluence is tot_elec_flue = df.electron.sum() print('{:.3f} cm^-2 source-^1'.format(tot_elec_flue)) # The T-Cross area is t_cross_area = (t_cross_lines[1263].split())[2] t_cross_area = float(t_cross_area) print('{} cm^2'.format(t_cross_area)) # Multiplying the two fluence by the t-cross area gives tot_elec_flue * t_cross_area # which is almost equal to 1. This means that the fluence had been calculated as the number of particles passed through the T-Cross area divided by the T-Cross area: the_tot_elec_flue = 1 / t_cross_area print('{:.3f} cm^-2 source^-1'.format(the_tot_elec_flue)) # We have just confirmed how the T-Cross tally is calculated (p. 210, PHITS v3.02 Jp man). Now, change the electron energy distribution from monoenergetic to Gaussian-energetic and, see the effect. # ### Gaussian energy distribution # Part of a PHITS input file 'etype2.inp' # + # I/O f = '{}/etype2.inp'.format(phits_path) fh = open(f) lines = [v.rstrip() for v in fh] fh.close() rows = { 'params': { 'header': '', 'range': [22, 23], }, 'source': { 'header': '', 'range': range(47, 56), }, } rows_of_int = [ 'params', 'source', ] disp_rows_of_int(lines, rows, rows_of_int) # - # Run the PHITS input with icntl = 0. # !phits ../phits/etype2.inp # Check the T-Cross result. t_cross_bname_full = '{}/etype2-cross-eng-intact'.format(phits_path) t_cross = '{}.ang'.format(t_cross_bname_full) t_cross_trimmed = '{}_trimmed.ang'.format(t_cross_bname_full) # Read in the T-Cross result file. t_cross_fh = open(t_cross) t_cross_lines = [v for v in t_cross_fh] t_cross_fh.close() # Retrieve only the data part and write them to a file. data_rows = range(251, 1252) t_cross_data = [t_cross_lines[v] for v in data_rows] t_cross_data[0] = re.sub('#(.*)', ' \\1', t_cross_data[0]) t_cross_trimmed_fh = open(t_cross_trimmed, 'w') for line in t_cross_data: t_cross_trimmed_fh.write(line) t_cross_trimmed_fh.close() df2 = pd.read_csv(t_cross_trimmed, sep=r'\s+') df2 plot_flue(plt.subplot(), df2.loc[:, 'e-upper'], df2.loc[:, 'electron'], clr='b', mrk='None', ls='-', lab='e-type = 2') save_curr_fig('etype2') # The total electron fluence is tot_elec_flue = df2.electron.sum() print('{:.3f} cm^-2 source-^1'.format(tot_elec_flue)) # which is almost the same as the monoenergetic one (see [Monoenergetic](#Monoenergetic)). # ### Additional test: Use of e-type = 2 as e0 # The main commands of the e-type = 2 subsection include (p. 100, PHITS v3.02 JP man) # # * eg0: Center of Gaussian distribution (MeV) # * eg1: FWHM of Gaussian distribution (MeV) # * eg2: Minimum cutoff for Gaussian distribution (MeV) # * eg3: Maximum cutoff for Gaussian distribution (MeV) # # One can guess that by setting eg1 = 0, the energy distribution would become monoenergetic, in which case the e-type = 2 can completely replace the e0 command. # Prepare a PHITS input file having e-type = 2 and eg1 = 0. # Part of a PHITS input file 'etype2_fwhm0.inp' # + # I/O f = '{}/etype2_fwhm0.inp'.format(phits_path) fh = open(f) lines = [v.rstrip() for v in fh] fh.close() rows = { 'params': { 'header': '', 'range': [22, 23], }, 'source': { 'header': '', 'range': range(47, 63), }, } rows_of_int = [ 'params', 'source', ] disp_rows_of_int(lines, rows, rows_of_int) # - # Run the PHITS input with icntl = 0. # !phits ../phits/etype2_fwhm0.inp # Check the T-Cross result. t_cross_bname_full = '{}/etype2_fwhm0-cross-eng-intact'.format(phits_path) t_cross = '{}.ang'.format(t_cross_bname_full) t_cross_trimmed = '{}_trimmed.ang'.format(t_cross_bname_full) # Read in the T-Cross result file. t_cross_fh = open(t_cross) t_cross_lines = [v for v in t_cross_fh] t_cross_fh.close() # Retrieve only the data part and write them to a file. data_rows = range(251, 1252) t_cross_data = [t_cross_lines[v] for v in data_rows] t_cross_data[0] = re.sub('#(.*)', ' \\1', t_cross_data[0]) t_cross_trimmed_fh = open(t_cross_trimmed, 'w') for line in t_cross_data: t_cross_trimmed_fh.write(line) t_cross_trimmed_fh.close() df3 = pd.read_csv(t_cross_trimmed, sep=r'\s+') df3 fig, axs = plt.subplots(1, 2, figsize=(8, 3.5)) plot_flue(axs[0], df.loc[:, 'e-upper'], df.loc[:, 'electron'], loc=5) plot_flue(axs[1], df3.loc[:, 'e-upper'], df3.loc[:, 'electron'], clr='b', mrk='x', ls='None', lab='e-type = 2 with FWHM = 0', loc=5) fig.tight_layout(pad=1.5) save_curr_fig('etype2_as_e0') # The total electron fluence is tot_elec_flue = df3.electron.sum() print('{:.3f} cm^-2 source-^1'.format(tot_elec_flue)) # which is exactly the same as the monoenergetic one (see [Monoenergetic](#Monoenergetic)). # ## Summary # * In this test, the e-type = 2 subsection was compared with the e0 command. # * The obtained electron fluences were found to be equivalent. # * By setting the FWHM of Gaussian energy distribution (eg1), the e-type = 2 subsection can be used as the e0 command. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A framework for petrophysically and geologically guided geophysical inversion (PGI) # # _ and _ # # # # These notebooks were used to generate the figures shown in the article [A framework for petrophysically and geologically guided geophysical inversion](https://doi.org/10.1093/gji/ggz389). We perform PGI over various examples: a 1D MT layered-earth, a DC profile over 2 cylinders, a stitched FDEM survey for saline water intrusion # ## Contents # # There are 3 notebooks in this repository: # # - [1_MT_PGI_Sharp_Smooth.ipynb](notebooks/MT/1_MT_PGI_Sharp_Smooth.ipynb) # # - Magnetotelluric data are acquired over a layered-earth that has sharp and smooth features. The PGI algorithm is provided with the true petrophysical distribution and the goal is to use it along with the MT data to find a solution that has the desired contrast features. # # # - [2_DC_PGI_2cylinders.ipynb](notebooks/DC/2_DC_PGI_2cylinders.ipynb) # # - A DC resistivity profile is acquired over two cylinders. We illustrate the performance of this framework when no physical property mean values are available, and compared it to the result with full petrophysical information. We highlight then how geological information from borehole logs can be incorporated into this framework. # # # - [3_FDEM_PGI_Bookpurnong.ipynb](notebooks/FDEM/3_FDEM_PGI_Bookpurnong.ipynb) # # - This example illustrates an application of the PGI approach on a field frequency-domain EM dataset, in conjunction with a structurally constraining regularization, without using extensive geological or petrophysical information. We demonstrate how to use this framework to test hypothesis, such as a recovering a specific number of distinct units, and to build confidence, or doubts, in geological features displayed by the inversions. # # ## Usage # # To setup your software environment, we recommend you use the provided conda environment # # ``` # conda env create -f environment.yml # conda activate pgi-environment # ``` # # alternatively, you can install dependencies through pypi # # ``` # pip install -r requirements.txt # ``` # # Please [make an issue](https://github.com/simpeg-research/Astic-2019-PGI/issues/new) if you encounter any problems while trying to run the notebooks. # ## Citation # # # ., , and , 2021, Petrophysically and geologically guided multi-physics inversion using a dynamic Gaussian mixture model: Geophysical Journal International, 224(1), 40–68. https://doi.org/10.1093/gji/ggaa378 # # ., and , 2019, A framework for petrophysically and geologically guided geophysical inversion using a dynamic Gaussian mixture model prior: Geophysical Journal International, 219(3), 1989-2012. https://doi.org/10.1093/gji/ggz389 # # . and , 2018, Petrophysically guided geophysical inversion using a dynamic Gaussian mixture model prior. In SEG Technical Program Expanded Abstracts 2018 (pp. 2312-2316). https://doi.org/10.1190/segam2018-2995155.1 # # # ``` # @article{PGI_Joint, # author = { and Heagy, and Oldenburg, }, # title = "{Petrophysically and geologically guided multi-physics inversion using a dynamic Gaussian mixture model}", # journal = {Geophysical Journal International}, # volume = {224}, # number = {1}, # pages = {40-68}, # year = {2020}, # month = {08}, # issn = {0956-540X}, # doi = {10.1093/gji/ggaa378}, # url = {https://doi.org/10.1093/gji/ggaa378}, # eprint = {https://academic.oup.com/gji/article-pdf/224/1/40/34193255/ggaa378.pdf}, # } # # @article{PGI_framework, # author = { and Oldenburg, }, # title = "{A framework for petrophysically and geologically guided geophysical inversion using a dynamic Gaussian mixture model prior}", # journal = {Geophysical Journal International}, # volume = {219}, # number = {3}, # pages = {1989-2012}, # year = {2019}, # month = {08}, # issn = {0956-540X}, # doi = {10.1093/gji/ggz389}, # url = {https://doi.org/10.1093/gji/ggz389}, # eprint = {http://oup.prod.sis.lan/gji/article-pdf/219/3/1989/30144784/ggz389.pdf}, # } # # @inbook{Astic2018, # author = { and }, # title = {Petrophysically guided geophysical inversion using a dynamic Gaussian mixture model prior}, # booktitle = {SEG Technical Program Expanded Abstracts 2018}, # chapter = {}, # pages = {2312-2316}, # year = {2018}, # doi = {10.1190/segam2018-2995155.1}, # URL = {https://library.seg.org/doi/abs/10.1190/segam2018-2995155.1}, # eprint = {https://library.seg.org/doi/pdf/10.1190/segam2018-2995155.1} # } # ``` # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # --- # title: '曾经的神:Support Vector Machine' # date: 2021-07-30 # permalink: /posts/2021/07/svmyyds/ # tags: # - Machine Learning # --- # # Intro # 最近申请工作的时候被问到了,自己这个菜鸡🐔对国内工作不熟悉加上没有充分准备,感觉回答的不是很好😭, # 今天,我们就来好好梳理一下什么是SVM,怎么用,原理和trick~ # # --- # # Support Vector Machine (Classifier) # suggest readings: # - Alpaydin: 10.3, 13.1, 13.2 # - Murphy: 172.16.17.32 # - Geron: chapter 5, appendix C # ## Alpaydin 阅读笔记 # # ### [10.3] Geometry of the Linear Discriminant (线性可区分的图像) # - 从一个简单的 two classes 情况开始解析,在这种情况下,单个的区分方程已经足够: # - Discriminant between two classes: # $$ # \begin{equation}\label{Discriminant_between_two_classes} # \begin{split} # g(\chi) &= g_1(\chi) - g_2(\chi)\\ # &= ((w_1)^T\chi + w_{10}) - ((w_2)^T\chi + w_{20})\\ # &= (w_1 - w_2)^T\chi + (w_{10} - w_{20})\\ # &= w^T\chi + w_0 # \end{split} # \end{equation} # $$ # - we choose # $$ # \left\{ # \begin{matrix} # C_1,\text{ }g(\chi) > 0\\ # C_2,\text{ }otherwise # \end{matrix} # \right. # $$ # # `weight vector threshold`: # - defines a hyperplane where $w$ is the weight vector and $w_0$ is the threshold. # # ### Decision rule # - choose $C_1$ if $w^T\chi > -w_0$, and choose $C_2$ otherwise. The hyperplane divides the input space into two half-spaces: decision region $R_1$ for $C_1$ and $R_2$ for $C_2$. Any $\chi$ in $R_1$ is on the `positive` side of the hyperplane and any $\chi$ in $R_2$ is on the `negative` side. # ![IMG_B352B4820514-1.jpeg](attachment:IMG_B352B4820514-1.jpeg) # # ### Decision bounduary equation # - Let's take 2 points on the decision surface; that is, $g(\chi_1) = g(\chi_2) = 0$, then # # $$ # \begin{equation} # \begin{split} # w^T\chi_1 + w_0 &= w^T\chi_2 + w_0\\ # w^T(\chi_1 - \chi_2) &= 0 # \end{split} # \end{equation} # $$ # # and we see that $w$ is *normal* to any vector lying on the hyperplane. Let's rewrite $\chi$ as # # $$ # \begin{equation} # \chi = \chi_p + r\frac{w}{||w||} # \end{equation} # $$ # # - $\chi_p$: normal projection of $\chi$ onto the hyperplane # - $r$: the distance from $\chi$ to the hyperplane # - $<0$ if $\chi$ is on the negative side # - $>0$ if $\chi$ is on the positive side # # $$ # \begin{equation} # r = \frac{g(\chi)}{||w||} # \end{equation} # $$ # # So, at the origin the distance $r_0$ will be # # $$ # \begin{equation} # r_0 = \frac{w_0}{||w||} # \end{equation} # $$ # # Thus $w_0$ determines the `location` of the hyperplane with respect to the origin, and $w$ determines its `orientation`. # ![IMG_686E4B07D60D-1.jpeg](attachment:IMG_686E4B07D60D-1.jpeg) # ### Multiple Classes # # When there are K>2 classes, there are K discriminant(可辨别的)funtions. When they are linear, we have # # $$ # \begin{equation} # g_i(\chi|w_i, w_{i0}) = w^{T}_i\chi + w_{i0} # \end{equation} # $$ # ![IMG_7F0A959AD62D-1.jpeg](attachment:IMG_7F0A959AD62D-1.jpeg) # We assume that the parameters, $w_i$, $w_i0$, are computed so as to have # $$ # g_i(\chi)= # \left\{ # \begin{matrix} # C_1,\text{ }g(\chi) > 0\\ # C_2,\text{ }otherwise # \end{matrix} # \right. # $$ # # for all $\chi$ in the training set. # # - Using such discriminant functions corresponds to *assuming* that ``all calsses are linearly separable`` # # - Sometimes not all the points can lies either in (+) or (-) class. The susal approach is to assign $\chi$ to the class having the highest discriminant: # - choose $C_i$ if $g_i(\chi) = max^K_{j=1}g_j(\chi)$ # - linear classifier: recall the $\frac{|g_i(\chi)|}{||w_i||}$ is the ``distance from the input point to the hyperplane``, assuming $w_i$ have similar length, this assigns the point to the class(among all $g_j(\chi) > 0$) to whose hyperplane the point is most distant. # - linear classifier cont: it geometrically divides the feature space into K convex decision regions $R_i$ # ### [13.1] Kernel Machines # > *Kernel Machines* are `maximum margin methods` that allow the model to be written as a sum of the influences of a subset of the training instances. These influences are given by application-specific similarity kernels, and we discuss "kernelized" classification, regression, outlier detection, and dimensionality reduction, as well as *how to choose and use kernels*. # # --- # # - Each learning algorithm has a different `inductive bias`, makes different `assumptions`, and defines a different `objective function` and thus may find a different *linear model*. # - The model `support vector machine`(SVM) was generalized under the name `kernel machine` # # It is popular because: # - It never solve a more complex problem as a `first step` before the actual problem(Vapnik 1995) # - Goal: We only need to find the boundary separating those $\chi$ that have low $p(\chi)$, that is, $\chi$ where $p(x) < \theta$, for some threshold $\theta \in (0,1)$ # ### Linear Discriminant # - Generative vs. Discriminative # * Generative model # - lkj # ### Support Vector Machine (SVM) # ### Primal & Dual Unconstrained Optimization # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + s = input() lower = upper = odd = even = str() for i in s: if i.isalpha(): if i==i.lower(): lower += i else: upper += i elif i.isnumeric(): if int(i)%2 != 0: odd += i else: even += i result = ''.join(sorted(lower)+sorted(upper)+sorted(odd)+sorted(even)) print(result) # - import string l = f'{string.ascii_lowercase}{string.ascii_uppercase}{1357902468}' print(*sorted(input(), key=l.index), sep ="") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## JSON # # JSON - Javascript Object Notation # #### Invented by when working at Yahoo in early 2000s. # # * Goal - Human Readable, Machine Parsable # # * Specification: https://www.json.org/ # JSON — short for JavaScript Object Notation — format for sharing data. # # JSON is derived from the JavaScript programming language # # Available for use by many languages including Python # # usually file extension is .json when stored # # # + # Sample JSON below from https://json.org/example.html # Question why is Syntax highlighting working properly ? :) # - {"widget": { "debug": "on", "window": { "title": "Sample Konfabulator Widget", "name": "main_window", "width": 500, "height": 500 }, "image": { "src": "Images/Sun.png", "name": "sun1", "hOffset": 250, "vOffset": 250, "alignment": "center" }, "text": { "data": "Click Here", "size": 36, "style": "bold", "name": "text1", "hOffset": 250, "vOffset": 100, "alignment": "center", "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" } }} # if this was string starting with { it would be our json mydata = { "firstName": "Jane", "lastName": "Doe", "hobbies": ["running", "sky diving", "dancing"], "age": 43, "children": [ { "firstName": "Alice", "age": 7 }, { "firstName": "Bob", "age": 13 } ] } type(mydata) print(mydata) mydata['children'][1]['age'] mydata['children'][-1]['age'] mydata.get('hobbies') mydata.get('hobbies')[-1],mydata['hobbies'][-1] mylist = list(range(10)) print(mylist) # The process of encoding JSON is usually called serialization. This term refers to the transformation of data into a series of bytes (hence serial) to be stored or transmitted across a network. You may also hear the term marshaling, but that’s a whole other discussion. Naturally, deserialization is the reciprocal process of decoding data that has been stored or delivered in the JSON standard. # # All we’re talking about here is reading and writing. Think of it like this: encoding is for writing data to disk, while decoding is for reading data into memory. # https://realpython.com/python-json/ mydata # simply a PYthon dictionary with some lists inside etc # we need a library for decoding and encoding json import json # first we are going to dump our data into a text file with open("data_file.json", mode="w") as write_file: json.dump(mydata, write_file) # remember that stream is closed here and file is written by now # this will be nicer with open("data_file_indented.json", mode="w") as write_file: json.dump(mydata, write_file, indent=4) with open("data_file_indented.json") as f: raw_txt = f.read() raw_txt[:150] # deserialize, decode from json string into Python Data structure my_data = json.loads(raw_txt) type(my_data) my_data.keys() my_data['children'] # more often we will load json immediately with open("data_file_indented.json") as f: my_data_2 = json.load(f) type(my_data_2) # contents are the same but two different objects my_data == my_data_2, my_data is my_data_2 my_json_string = json.dumps(my_data) # converst Python data structure into json string my_json_string[:100] type(my_json_string) json.loads(my_json_string) json.dumps(my_data) import requests # this library is not included with Python but is very popular and comes with Anaconda # pip install requests otherwise # we make a http request to a url and print the response code url = "https://my.api.mockaroo.com/ageincluded.json?key=" response = requests.get(url) print(response.status_code) # Response Code 200 is good! data_from_json = response.json() # we do not need json.loads type(data_from_json), len(data_from_json) response.text[:50] my_mock_data = json.loads(response.text) type(my_mock_data), len(my_mock_data) data_from_json == my_mock_data, data_from_json is my_mock_data import pandas as pd df = pd.read_json(url) df.head() df.to_csv("mock_data.csv") # + # idea get average age of Japanese Men in our JSON data # - data_from_json[:5] myjson = data_from_json # just an alias japanese = [person for person in myjson if person.get('email').endswith('.jp')] # potentially person.get('email') could return None then .endswith('.jp') would fail with error japanese japanese_men = [p for p in japanese if p.get('gender') == "Male"] # potentially person.get('email') could return None then .endswith('.jp') would fail with error japanese_men uk = [person for person in myjson if person.get('email',"").endswith('.uk')] uk uk_ages = [(person.get('first_name'),int(person.get('age'))) for person in uk] uk_ages jp_ages = [(person.get('first_name'),int(person.get('age'))) for person in japanese] jp_ages with open("uk_ages.json", mode="w") as fstream: json.dump(uk_ages, fstream, indent=4) # so we lose the tuple designed when writing to JSON and back with open("uk_ages.json") as fstream: uk_data = json.load(fstream) uk_data with open("jp_ages.json", mode="w") as fstream: json.dump(jp_ages, fstream, indent=4) type(mydata) type(json_string) type(mydata) # Convert Json_string back to our Python Object my_obj = json.loads(json_string) my_obj my_obj.get('firstName') mydata newlist = json.loads('[1,3,5,"Valdis"]') newlist type(newlist) badlist = json.loads('[1,3,5,"Vald]",334342]') badlist type(json_string) # + # Avove example JSON and Python object have the same syntax but there are some differences # - # ![object](../img/object.png) # ![Array](../img/array.png) # ![Value](../img/value.png) # Simple Python objects are translated to JSON according to a fairly intuitive conversion. # # Python JSON # # dict object # # list, tuple array # # str string # # int, long, # # float number # # True true # # False false # # None null newlist = json.loads('[true,2,null, false, 555.333]') newlist # + # The first option most people want to change is whitespace. You can use the indent keyword argument to specify the indentation size for nested structures. Check out the difference for yourself by using data, which we defined above, and running the following commands in a console: json.dumps(mydata) # - # very useful for visibility! print(json.dumps(mydata, indent=4)) with open("data_file.json", "w") as write_file: json.dump(mydata, write_file, indent=4) # how to read json into our python data obj from file with open("data_file.json", "r") as read_file: data = json.load(read_file) data type(data) len(data) list(data.items()) numberedlist = list(enumerate(data.items())) numberedlist # save numberedlist with ident = 4 in file numberedlist.json with open('numberedlist.json', mode='w') as f: json.dump(numberedlist, f, indent=4) # Keep in mind that the result of this method could return any of the allowed data types from the conversion table. This is only important if you’re loading in data you haven’t seen before. In most cases, the root object will be a dict or a list. # If you've gotten JSON data in from another program or have otherwise obtained a string of JSON formatted data in Python, you can easily deserialize that with loads(), which naturally loads from a string: json_string = """ { "researcher": { "name": "", "species": "Betelgeusian", "relatives": [ { "name": "", "species": "Betelgeusian" } ] } } """ data = json.loads(json_string) data # get value of relative's name data['researcher'] # get value of relative's name data['researcher']['relatives'] # get value of relative's name data['researcher']['relatives'][0] # get value of relative's name data['researcher']['relatives'][0]['name'] data['researcher']['relatives'][0]['name'].split()[0] data['researcher']['relatives'][0]['name'].split()[0][:4] type(data) import json import requests # + ## Lets get some data https://jsonplaceholder.typicode.com/ # - response = requests.get("https://jsonplaceholder.typicode.com/todos") if response.status_code != 200: print("Bad Response: ", response.status_code) print(response.status_code) todos = json.loads(response.text) # can open https://jsonplaceholder.typicode.com/todos in regular browser too.. type(todos) len(todos) todos[:10] # completedworks = [el for el in todos if el['completed'] == True] completedworks = [el for el in todos if el.get('completed') == True] len(completedworks) completedworks[-10:] type(completedworks) users = {} for el in completedworks: k = el['userId'] if k in users: users[k] +=1 else: users[k] = 1 users sorted(users.items(),key = lambda el: el[1], reverse=True) from collections import Counter count = Counter([el['userId'] for el in completedworks]) count.most_common() # lets do everything at once finishedcount = Counter([el.get('userId') for el in todos if el.get('completed') == True]) finishedcount.most_common() import matplotlib.pyplot as plt plt.bar(finishedcount.keys(), finishedcount.values()) plt.show() todos[-3:] [1,2] + [3,4,6,6] todos += [{'completed':True},{'completed':True},{'completed':True},{'completed':True}] todos[-5:] # lets do everything at once finishedcount = Counter([el.get('userId') for el in todos if el.get('completed') == True]) finishedcount.most_common() myl = [('Valdis', 40), ('Alice',35), ('Bob', 23),('Carol',70)] # + # Lambda = anonymous function # - def myfun(el): return el[1] # same as myfun = lambda el: el[1] sorted(myl, key = lambda el: el[1], reverse=True) # + # Exercise find out top 3 users with most tasks completed! # TIPS # we need some sort of structure to store these user results before finding out top 3 # at least two good data structure choices here :) # here the simplest might actually be the best if we consider userId values # - todos[0] todos[0]['userId'] todos[0]['completed'] # Here we create a new dictionary and and count the completed works by id newdict = {} for todo in todos: if todo['completed'] == True: if todo['userId'] in newdict: newdict[todo['userId']] += 1 else: newdict[todo['userId']] = 1 newdict sorted(newdict.items()) bestworkers = sorted(newdict.items(), key=lambda el: el[1], reverse=True) bestworkers[:3] users = [ el['userId'] for el in todos] len(users),users[:15] uniqusers = set(users) uniqusers # dictionary comprehension but could live without one users = { el['userId'] : 0 for el in todos} users users.keys() users.value # + #{'completed': True, # 'id': 8, # 'title': 'quo adipisci enim quam ut ab', # 'userId': 1} # - #idiomatic for el in todos: users[el['userId']] += el['completed'] # Boolean False is 0 True is 1 obviously this might not be too readable # same as above could be useful in more complicated cases for el in todos: if el['completed'] == True: users[el['userId']] += 1 # + # there could be a one liner or a solution with from collections import Counter # - users.items() list(users.items()) userlist=list(users.items()) type(userlist[0]) # we pass a key anonymous(lambda) function sorted(userlist, key=lambda el: el[1], reverse=True)[:3] # + # lets try a simple way # - mylist=[0] mylist*=11 for el in todos: if el['completed'] == True: mylist[el['userId']] +=1 mylist mylist.index(max(mylist)) # + # kind of hard to get more values need to get tricky # - # # How about Pandas and Json ? import pandas as pd df = pd.read_json('https://jsonplaceholder.typicode.com/todos') df.head() df.to_csv('my_todos.csv') df.shape df.describe() df.describe(include=['O']) # we see that completed df.groupby(['userId']).sum() df.groupby(['userId']).sum()['completed'].plot(kind="bar") busyjson df.groupby(['userId'])['completed'].sum() # if we need a single column dataframe df.groupby(['userId'])[['completed']].sum() df.groupby(['userId'])['completed'].sum().sort_values() df.groupby(['userId'])['completed'].sum().sort_values(ascending=False) busyjson = pd.read_json('https://jsonplaceholder.typicode.com/todos').groupby(['userId'])['completed'].sum().sort_values(ascending=False).to_json() def prettyJSON(myjson): return json.dumps(json.loads(myjson), indent=4) type(busyjson) prettybusy = prettyJSON(busyjson) with open('prettybusy.json', mode='w') as f: f.write(prettybusy) # # Exercise Find Public JSON API get data and convert it into Pandas DataFrame # # ## Many possible sources # # https://github.com/toddmotto/public-apis # # ### You want the ones without authorization and WITH CORS unless you are feeling adventurous and want to try with auth # # df = pd.read_json('https://cat-fact.herokuapp.com/facts/random?animal_type=cat&amount=50') df.head() df.columns = sorted(df.columns) df.head() response = requests.get("https://cat-fact.herokuapp.com/facts/random?animal_type=cat&amount=50") if response.status_code != 200: print("Bad Response: ", response.status_code) print(response.status_code) cats = json.loads(response.text) cats[:3] response = requests.get("https://cat-fact.herokuapp.com/facts/random", params={"animal_type":"cat", "amount":20}) if response.status_code != 200: print("Bad Response: ", response.status_code) print(response.status_code) cats = json.loads(response.text) # simpler response.json() cats[:3] len(cats)` response.status_code cats2 = response.json() len(cats2) cats2 == cats, cats2 is cats #data are the same we just have two different copies of them df.loc[0, 'text'] # + ## For authorization you generally need some sort of token(key) # One example for zendesk API https://develop.zendesk.com/hc/en-us/community/posts/360001652447-API-auth-in-python # For an API token, append '/token' to your username and use the token as the password: ## This will not work for those without zendesk access token url = 'https://your_subdomain.zendesk.com/api/v2/users/123.json' r = requests.get(url, auth=('.com/token', 'your_token')) # For an OAuth token, set an Authorization header: bearer_token = 'Bearer ' + access_token header = {'Authorization': bearer_token} url = 'https://your_subdomain.zendesk.com/api/v2/users/123.json' r = requests.get(url, headers=header) # - def myReadJSON(url): response = requests.get(url) if response.status_code != 200: print("Bad Response: ", response.status_code) print("Status CODE", response.status_code) return json.loads(response.text) rawdrinks = myReadJSON("https://www.thecocktaildb.com/api/json/v1/1/search.php?s=margarita") type(rawdrinks) rawdrinks.keys() mydrinks = pd.DataFrame(rawdrinks['drinks']) mydrinks.head() # we can Transpose to get a sense of all columns mydrinks.head().T drinks = pd.read_json("https://www.thecocktaildb.com/api/json/v1/1/search.php?s=margarita") drinks.head() # + # requests also works with post type of requests # - url = "http://www.recipepuppy.com/api/?i=onions,garlic&q=omelet&p=3" response = requests.get(url) response.status_code o_data = response.json() type(o_data) o_recipes = o_data.get('results') type(o_recipes) o_recipes[:3] # for 100 suggestion is to use time.sleep(0.2) # it is good manners to sleep a little to avoid DDOS attack on API server import time time.sleep(0.5) # half a second delay # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- dataset = spark.read.csv("file:///home/guri/Desktop/Wine-Dataset-Classification-Using-Spark-Mlib/wine.csv",header=True,inferSchema=True) from pyspark.ml.classification import LogisticRegression dataset.printSchema() dataset.show(5) # + from pyspark.sql.functions import isnan, when, count, col dataset.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in dataset.columns]).show() # - dataset.columns # + from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler assembler=VectorAssembler(inputCols=['Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'],outputCol='features') output_data=assembler.transform(dataset) # - final_data=output_data.select('features','Class') train,test=final_data.randomSplit([0.7,0.3]) model=LogisticRegression(labelCol='Class') model=model.fit(train) summary=model.summary summary.predictions.describe().show() # + from pyspark.ml.evaluation import BinaryClassificationEvaluator predictions=model.evaluate(test) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Fahrenheit to Celsius # =========== # + Fahrenheit = 32.0 Celsius = (Fahrenheit - 32) * 5.0/9.0 print("Temperature: {F} Fahrenheit = {C} Celsius".format(F=Fahrenheit, C=Celsius)) # - # Celsius to Fahrenheit # =========== # + Celsius = 100.0 Fahrenheit = 9.0/5.0 * Celsius + 32 print("Temperature: {C} Celsius = {F} Fahrenheit".format(F=Fahrenheit, C=Celsius)) # - # Plot Example # ======= # %matplotlib inline import matplotlib.pyplot as plt # + def C2F(C): return 9.0/5.0 * C + 32 C2F(100) # - x = [C2F(c) for c in range(101)] x[0:10] plt.title("Temperature Conversion") plt.xlabel("Celsius") plt.ylabel("Fahrenheit") plt.plot(x) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="kwkM8UqSWBkX" outputId="ca8cc478-7ee4-4a16-9bc1-92b0727757ed" colab={"base_uri": "https://localhost:8080/", "height": 35} import tensorflow as tf tf.__version__ # + id="b6bLpp_Ce5Ok" # Import libraries import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from __future__ import absolute_import, division, print_function import tensorflow as tf from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten, LSTM, BatchNormalization from keras.preprocessing.image import ImageDataGenerator import warnings warnings.filterwarnings("ignore") # + id="wvU8p9mSXO49" outputId="488b95d1-0b57-4ae6-a3d4-d69e51ea2008" colab={"base_uri": "https://localhost:8080/", "height": 377} # !wget -O comp551w18-modified-mnist.zip https://www.dropbox.com/sh/dhu4cu8l8e32cvl/AACSbGE9X6P7an61STwwr8R0a?dl=0 # + id="Tq3COEzCXVRT" outputId="e49e4ea2-88ea-4a68-d481-432d629f2efc" colab={"base_uri": "https://localhost:8080/", "height": 85} # Run two times # !unzip comp551w18-modified-mnist.zip # + id="0JxbV7CbZ83M" outputId="77ef907d-75cb-4b41-f3f0-b3792ce057ff" colab={"base_uri": "https://localhost:8080/", "height": 51} # !ls # + id="u7w5IXiMaIcF" x = np.loadtxt("train_x.csv", delimiter=",")# Load from text y = np.loadtxt("train_y.csv", delimiter=",") x = x.reshape(-1, 64, 64) # Reshape y = y.reshape(-1, 1) # + id="-hW8k0DSaVUb" outputId="3a5293f6-f1d8-4901-d031-e68a68cc0992" colab={"base_uri": "https://localhost:8080/", "height": 1000} for i in range(5): plt.figure() plt.imshow(x[i]) # + [markdown] id="oMO7avD4aX3P" # This project is using Modified MNIST handwritten digits. The dataset contains 50000 examples for training and 10000 for testing. The digits will be size-normalized and centered in a fixed-size image(64*64 pixels) with values from 0 to 255. # + [markdown] id="XDHJ9lH4vJE5" # # 1. Data Preprocessing # + id="ZXrGKvndvH-n" # Split dataset into test and train x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=7) # + id="RqjLg05NvN17" # MNIST dataset parameters. num_classes = 10 # 0 to 9 digits num_features = 4096 # 64*64 # Convert to float32. x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # Flatten images to 1-D vector of 4096 features (64*264). x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features]) # Normalize images value from [0, 255] to [0, 1]. x_train, x_test = x_train / 255., x_test / 255. # + id="he-1IfWkvQQC" y_test = y_test.reshape(len(y_test), ) y_test = y_test.astype(int) # + id="7j3wnZlnvQyh" y_train = y_train.reshape(len(y_train), ) y_train = y_train.astype(int) # + [markdown] id="y4gpuaaubEsQ" # # 1. Logistic Regression # # To feed in the LR model, each image will be converted to float 32, normalized to [0,1] and flattened to a 1-D array of 4096 features(64*64). # + id="p1TrgOsAa2T3" # Training parameters. learning_rate = 0.01 training_steps = 500 batch_size = 128 display_step = 50 # + id="2ME3mk7AcJ08" # Use tf.data API to shuffle and batch data. train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) # + id="nxDDBNBVcOAs" # Weight of shape [4096, 10], the 64*64 image features, and total number of classes. W = tf.Variable(tf.ones([num_features, num_classes]), name="weight") # Bias of shape [10], the total number of classes. b = tf.Variable(tf.zeros([num_classes]), name="bias") # + id="AuMCJcJNcVcp" # Logistic regression (Wx + b). def logistic_regression(x): # Apply softmax to normalize the logits to a probability distribution. return tf.nn.softmax(tf.matmul(x, W) + b) # Cross-Entropy loss function. def cross_entropy(y_pred, y_true): # Encode label to a one hot vector. y_true = tf.one_hot(y_true, depth=num_classes) # Clip prediction values to avoid log(0) error. y_pred = tf.clip_by_value(y_pred, 1e-9, 1.) # Compute cross-entropy. return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred),1)) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Stochastic gradient descent optimizer. optimizer = tf.optimizers.SGD(learning_rate) # + id="LlHckfM-cmrV" # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: pred = logistic_regression(x) loss = cross_entropy(pred, y) # Compute gradients. gradients = g.gradient(loss, [W, b]) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, [W, b])) # + id="VYAqJBU6crA4" outputId="9334bec0-2548-46b8-807b-dbf5a5a85631" colab={"base_uri": "https://localhost:8080/", "height": 187} # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = logistic_regression(batch_x) loss = cross_entropy(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # + id="ahPWjoxgfZdS" outputId="b85d4384-c1bd-4a2d-c94b-33920b326761" colab={"base_uri": "https://localhost:8080/", "height": 34} # Test model on validation set. pred = logistic_regression(x_test) print("Test Accuracy: %f" % accuracy(pred, y_test)) # + id="JTJACcN_feQJ" outputId="4d69df2e-aed9-450c-9088-8d8c4b92831d" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Predict 5 images from validation set. n_images = 5 test_images = x_test[:n_images] predictions = logistic_regression(test_images) # Display image and model prediction. for i in range(n_images): plt.imshow(np.reshape(test_images[i], [64, 64])) plt.show() print("Model prediction: %i" % np.argmax(predictions.numpy()[i])) # + [markdown] id="mln_TS_ygGgA" # # 2. Convolutional Neural Networks # # Unlike Logistic Regression and Recurrent Neural Networks, an advantage of CNN is that the input data doesn't need to be flattened, thus we will reshape the data to its origial shape to fit the model. # + id="ortym2B6f0f9" # Modified MNIST dataset parameters. num_classes = 10 # total classes (0-9 digits). # Training parameters. learning_rate = 0.01 training_steps = 500 batch_size = 128 display_step = 50 dropout = 0.7 # Network parameters. num_input = 4096 conv1_filters = 16 # number of filters for 1st conv layer. conv2_filters = 32 # number of filters for 2nd conv layer. conv3_filters = 128 # number of filters for 3nd conv layer. fc1_units = 1024 # number of neurons for 1st fully-connected layer. # + id="oZX_6Yg9hz1R" outputId="95f8af98-33d8-41ae-af22-6504b63e98e8" colab={"base_uri": "https://localhost:8080/", "height": 34} (x_train.shape,x_test.shape,y_train.shape,y_test.shape) # + id="a2tso1o8h6fY" # Create some wrappers for simplicity. def conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation. x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2): # MaxPool2D wrapper. return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') # + id="nrzqtNJeh_QE" # Store layers weight & bias # A random value generator to initialize weights. random_normal = tf.initializers.RandomNormal() weights = { # Conv Layer 1: 5x5 conv, 1 input, 16 filters 'wc1': tf.Variable(random_normal([5, 5, 1, conv1_filters])), # Conv Layer 2: 3x3 conv, 16 inputs, 32 filters. 'wc2': tf.Variable(random_normal([3, 3, conv1_filters, conv2_filters])), # Conv Layer 3: 3x3 conv, 32 inputs, 128 filters 'wc3': tf.Variable(random_normal([3, 3, conv2_filters, conv3_filters])), # FC Layer 1: 8*8*128 inputs, 1024 units. 'wd1': tf.Variable(random_normal([8*8*128, fc1_units])), # FC Out Layer: 1024 inputs, 10 units (total number of classes) 'out': tf.Variable(random_normal([fc1_units, num_classes])) } biases = { 'bc1': tf.Variable(tf.zeros([conv1_filters])), 'bc2': tf.Variable(tf.zeros([conv2_filters])), 'bc3': tf.Variable(tf.zeros([conv3_filters])), 'bd1': tf.Variable(tf.zeros([fc1_units])), 'out': tf.Variable(tf.zeros([num_classes])) } # + id="JnCo3n1uiNcw" # Create model def conv_net(x): # Input shape: [-1, 64, 64, 1]. A batch of 64x64x1 images. x = tf.reshape(x, [-1, 64, 64, 1]) # Convolution Layer. Output shape: [-1, 64, 64, 16]. conv1 = conv2d(x, weights['wc1'], biases['bc1']) # Max Pooling (down-sampling). Output shape: [-1, 32, 32, 16]. conv1 = maxpool2d(conv1, k=2) # Convolution Layer. Output shape: [-1, 32, 32, 32]. conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) # Max Pooling (down-sampling). Output shape: [-1, 16, 16, 32]. conv2 = maxpool2d(conv2, k=2) # Convolution Layer. Output shape: [-1, 16, 16, 128]. conv3 = conv2d(conv2, weights['wc3'], biases['bc3']) # Max Pooling (down-sampling). Output shape: [-1, 8, 8, 128]. conv3 = maxpool2d(conv3, k=2) # Reshape conv3 output to fit fully connected layer input, Output shape: [-1, 8*8*128]. fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]]) # Fully connected layer, Output shape: [-1, 1024]. fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) # Apply ReLU to fc1 output for non-linearity. fc1 = tf.nn.relu(fc1) # Apply Dropout fc1 = tf.nn.dropout(fc1, dropout) # Fully connected layer, Output shape: [-1, 10]. out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) # Apply softmax to normalize the logits to a probability distribution. return tf.nn.softmax(out) # + id="QcWsFy_Xivfy" # Cross-Entropy loss function. def cross_entropy(y_pred, y_true): # Encode label to a one hot vector. y_true = tf.one_hot(y_true, depth=num_classes) # Clip prediction values to avoid log(0) error. y_pred = tf.clip_by_value(y_pred, 1e-9, 1.) # Compute cross-entropy. return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred))) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # ADAM optimizer. optimizer = tf.optimizers.Adam(learning_rate) # + id="BTDL_vMkizAq" # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: pred = conv_net(x) loss = cross_entropy(pred, y) # Variables to update, i.e. trainable variables. trainable_variables = list(weights.values()) + list(biases.values()) # Compute gradients. gradients = g.gradient(loss, trainable_variables) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, trainable_variables)) # + id="kJ6LyIOri1cb" outputId="c41c29c1-1f87-4c2b-ddb8-66c63cd08b0b" colab={"base_uri": "https://localhost:8080/", "height": 187} # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = conv_net(batch_x) loss = cross_entropy(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # + id="ezmU1tTei4WH" outputId="af33bd7b-fa88-4d55-b190-ac27aa258041" colab={"base_uri": "https://localhost:8080/", "height": 34} # Test model on validation set. pred = conv_net(x_test) print("Test Accuracy: %f" % accuracy(pred, y_test)) # + [markdown] id="qcPkKoc9xNkF" # # 3. Gated CNN # The convolutional layer in GCN used a gating mechanism to allow the network to control what information should be propagated the hierarchy of layers. # + id="dt9bys4yzAXx" # Create some wrappers for simplicity. def gconv2d(x, W, b, V, c, num, strides=1): # Conv2D wrapper, with bias and gates. A = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') B = tf.nn.conv2d(x, V, strides=[1, strides, strides, 1], padding='SAME') A = tf.nn.bias_add(A, b) A = tf.nn.bias_add(B, c) return tf.math.multiply(A, tf.math.sigmoid(B)) # + id="B6Pi-ApFzDvL" # Store layers weight & bias # A random value generator to initialize weights. random_normal = tf.initializers.RandomNormal() weights = { # Conv Layer 1: 5x5 conv, 1 input, 16 filters 'wc1': tf.Variable(random_normal([5, 5, 1, conv1_filters])), 'wv1': tf.Variable(random_normal([5, 5, 1, conv1_filters])), 'c1': tf.Variable(5.), # Conv Layer 2: 3x3 conv, 16 inputs, 32 filters. 'wc2': tf.Variable(random_normal([3, 3, conv1_filters, conv2_filters])), 'wv2': tf.Variable(random_normal([3, 3, conv1_filters, conv2_filters])), 'c2': tf.Variable(3.), # Conv Layer 3: 3x3 conv, 32 inputs, 128 filters 'wc3': tf.Variable(random_normal([3, 3, conv2_filters, conv3_filters])), 'wv3': tf.Variable(random_normal([3, 3, conv2_filters, conv3_filters])), 'c3': tf.Variable(3.), # FC Layer 1: 8*8*128 inputs, 1024 units. 'wd1': tf.Variable(random_normal([8*8*128, fc1_units])), # FC Out Layer: 1024 inputs, 10 units (total number of classes) 'out': tf.Variable(random_normal([fc1_units, num_classes])) } biases = { 'bc1': tf.Variable(tf.zeros([conv1_filters])), 'bv1': tf.Variable(tf.zeros([conv1_filters])), 'bc2': tf.Variable(tf.zeros([conv2_filters])), 'bv2': tf.Variable(tf.zeros([conv2_filters])), 'bc3': tf.Variable(tf.zeros([conv3_filters])), 'bv3': tf.Variable(tf.zeros([conv3_filters])), 'bd1': tf.Variable(tf.zeros([fc1_units])), 'out': tf.Variable(tf.zeros([num_classes])) } # + id="R4cpoX9rzKSF" # Create model def gconv_net(x): # Input shape: [-1, 64, 64, 1]. A batch of 64x64x1 images. x = tf.reshape(x, [-1, 64, 64, 1]) # Convolution Layer. Output shape: [-1, 64, 64, 16]. conv1 = gconv2d(x, weights['wc1'], biases['bc1'], weights['wv1'], biases['bv1'], weights['c1']) #conv1 = conv2d(x, weights['wc1'], biases['bc1']) # Max Pooling (down-sampling). Output shape: [-1, 32, 32, 16]. conv1 = maxpool2d(conv1, k=2) # Convolution Layer. Output shape: [-1, 32, 32, 32]. conv2 = gconv2d(conv1, weights['wc2'], biases['bc2'], weights['wv2'], biases['bv2'], weights['c2']) #conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) # Max Pooling (down-sampling). Output shape: [-1, 16, 16, 32]. conv2 = maxpool2d(conv2, k=2) # Convolution Layer. Output shape: [-1, 16, 16, 128]. conv3 = gconv2d(conv2, weights['wc3'], biases['bc3'], weights['wv3'], biases['bv3'], weights['c3']) #conv3 = conv2d(conv2, weights['wc3'], biases['bc3']) # Max Pooling (down-sampling). Output shape: [-1, 8, 8, 128]. conv3 = maxpool2d(conv3, k=2) # Reshape conv3 output to fit fully connected layer input, Output shape: [-1, 8*8*128]. fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]]) # Fully connected layer, Output shape: [-1, 1024]. fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) # Apply ReLU to fc1 output for non-linearity. fc1 = tf.nn.relu(fc1) # Apply Dropout fc1 = tf.nn.dropout(fc1, dropout) # Fully connected layer, Output shape: [-1, 10]. out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) # Apply softmax to normalize the logits to a probability distribution. return tf.nn.softmax(out) # + id="pSflc5DxzPh-" # Cross-Entropy loss function. def cross_entropy(y_pred, y_true): # Encode label to a one hot vector. y_true = tf.one_hot(y_true, depth=num_classes) # Clip prediction values to avoid log(0) error. y_pred = tf.clip_by_value(y_pred, 1e-9, 1.) # Compute cross-entropy. return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred))) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # ADAM optimizer. optimizer = tf.optimizers.Adam(learning_rate) # + id="IY4D3F-szSnm" # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: pred = gconv_net(x) loss = cross_entropy(pred, y) # Variables to update, i.e. trainable variables. trainable_variables = list(weights.values()) + list(biases.values()) # Compute gradients. gradients = g.gradient(loss, trainable_variables) # Update W and b following gradients. optimizer.apply_gradients((grad, var) for (grad, var) in zip(gradients, trainable_variables) if grad is not None) # + id="7k5jMt5IzU8A" outputId="e76af344-1202-4144-ccae-aa7ca4307ddc" colab={"base_uri": "https://localhost:8080/", "height": 187} # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = gconv_net(batch_x) loss = cross_entropy(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # + [markdown] id="JLMtrFYxYClv" # # 4. Recurrent Neural Networks_LSTM # # To classify images using a recurrent neural network, we consider every image row as a sequence of pixels. Because Modified MNIST image shape is 64*64px, we will then handle 64 sequences of 64 timesteps for every sample. # + id="q1iYww8G9nSf" y_train_r = np_utils.to_categorical(y_train, num_classes) y_test_r = np_utils.to_categorical(y_test, num_classes) # + id="zV4E7yNL-Apw" outputId="28a3a218-759a-4f3e-a832-2ce308c20ba1" colab={"base_uri": "https://localhost:8080/", "height": 34} (x_train.shape,x_test.shape,y_train_r.shape,y_test_r.shape) # + id="iXy9l-88Xyq7" outputId="28b6dc37-55b9-4258-aff2-60989618b24e" colab={"base_uri": "https://localhost:8080/", "height": 221} # Hyper parameters batch_size = 128 nb_epoch = 30 # Parameters for MNIST dataset img_rows, img_cols = 64, 64 nb_classes = 10 # Parameters for LSTM network nb_lstm_outputs = 30 nb_time_steps = img_rows dim_input_vector = img_cols input_shape = (64, 64) x_train=x_train.reshape(x_train.shape[0],64,64) x_test=x_test.reshape(x_test.shape[0],64,64) model_LSTM = Sequential() model_LSTM.add(LSTM(nb_lstm_outputs, input_shape=input_shape)) model_LSTM.add(Dense(10, activation='softmax')) model_LSTM.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model_LSTM.summary() # + id="Vn_ouHEpX60p" outputId="22ac8843-0c9c-4c57-b476-f2f6380a8651" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Train the model history = model_LSTM.fit(x_train, y_train_r, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(x_test, y_test_r), verbose=1) # + [markdown] id="n8vhbTos-yNB" # # 5. CNN_VGG # Apart from a vanilla CNN model, I also used a more sophisticated model-VGG and got a decent performance. # + id="lSEa6ViJ_FAh" x_train, x_test = x_train.reshape([-1,64,64,1]), x_test.reshape([-1,64,64,1]) # + id="oJ_8BWJm-oxD" # Data augmentation input_shape = (64,64,1) datagen = ImageDataGenerator( rotation_range=20, width_shift_range=0.1, shear_range=0.2, height_shift_range=0.1, zoom_range=0.2 ) datagen.fit(x_train) # + id="AD2Sq-b7_vZn" # Build model def VGG_modified(input_shape, opt = tf.keras.optimizers.Adam(amsgrad=True)): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding="same", activation='relu', input_shape=input_shape, data_format="channels_last")) model.add(Conv2D(32, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(32, kernel_size=(3, 3), padding="same", activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), padding="same", activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(128, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(128, kernel_size=(3, 3), padding="same", activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(256, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(256, kernel_size=(3, 3), padding="same", activation='relu')) model.add(Conv2D(256, kernel_size=(3, 3), padding="same", activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) #sgd=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model # + id="gb6mo9RJ_vzV" outputId="c2aa48f5-0cfb-490f-b8ba-dc5781019d8e" colab={"base_uri": "https://localhost:8080/", "height": 598} model = VGG_modified((64,64,1)) history = model.fit_generator(datagen.flow(x_train, y_train_r, batch_size=64), validation_data=(x_test, y_test_r), epochs = 15) # + id="PM9v7L_vlfS5" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from datetime import datetime from sklearn.ensemble import IsolationForest from sklearn.decomposition import PCA import matplotlib.pyplot as plt from plotly import plotly as py from plotly.offline import init_notebook_mode, iplot import plotly.graph_objs as go init_notebook_mode(connected=True) from sqlalchemy import create_engine import warnings warnings.filterwarnings('ignore') # + from pricing.service.scoring.lscore import LScoring from sqlalchemy import create_engine import numpy as np import pandas as pd import requests from time import sleep from datetime import datetime from conector.mysql import mysql_engine, CaptalysDBContext from dateutil.relativedelta import relativedelta class DScoring(object): def __init__(self, cnpj, produto, socios=False, baseline_type = 'lscore'): self.cnpj = cnpj self.doctype = 'cpf' if len(self.cnpj)<12 else 'cnpj' self.baseline_type = baseline_type self.score_socios = socios self.produto = produto self.lscore = None self.baseline = 1000 self.fator_elegibilidade = 2 self.faturamento_medio = None self.calibracao_segmento = None self.consulta = None self.estados_dividas = None self.dispersao_divida = None self.idade_empresa = None self.metricas = None def score_mestre(self): ls = LScoring(cnpj=self.cnpj, produto=self.produto) df = pd.DataFrame(ls.params) periodo = len(df) datas = pd.date_range(end=datetime.now().date().replace(day=1) - relativedelta(months=1), periods=periodo, freq='MS') datas = [el.date() for el in datas] df['data'] = datas params = df.to_dict("records") ls.params = params lscore = ls.calcula().get('score') fat_medio = ls.faturamentos['valor'].mean() self.lscore = lscore self.faturamento_medio = fat_medio return @property def campos_divida(self): return { "restricoes" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"], "protestos" : ["data_anotacao", "natureza", "sub_judice_descricao", "valor"], "pendencias" : ["data_ocorrencia", "modalidade", "natureza", "valor"], "processos" : ["data_ocorrencia", "descricao_natureza", "natureza", "valor"], "restricoes_financeiras" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"] } @property def campos_rename(self): return { "processos" : {"descricao_natureza" : "modalidade_natureza"}, "pendencias" : {"modalidade" : "modalidade_natureza"}, "protestos" : {'sub_judice_descricao' : "modalidade_natureza", "data_anotacao" : "data_ocorrencia"} } @property def segmentos(self): return {"credito" : ['EMPRESCONTA', 'EMPRESTIMO', 'CREDCARTAO', 'FINANCIAMENT', 'CREDITOEFINANCIAMENTO-FINANC'], "processos" : ['EXCJUDTRAB', 'FISCALESTADUAL', 'EXECUCAO', 'FISCALFEDERAL', 'FISCALMUNICIPAL','EXECUCAO-JE', 'BUSCAEAPREENSAO'], "infra" : ['FATAGUA', 'TELEFFX', 'TELEFFIXA', 'TELEFMOVEL', 'CONDOMINIO', 'ENERGIAELET', 'ALUGUEL', 'SERVTELEFON'] } @staticmethod def get_numero_consulta(doc): engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@:23306/varejo") con = engine.connect() query = "select data_ref, numero_consulta from consultas_idwall_operacoes where cnpj_cpf='{}'".format(doc) df = pd.read_sql(query, con) numero = df[df['data_ref']==df['data_ref'].max()]["numero_consulta"].iloc[0] con.close() # self.numero_consulta = numero return numero @staticmethod def get_details(numero): URL = "https://api-v2.idwall.co/relatorios" authorization = "b3--4-ade8-78a" url_details = URL + "/{}".format(numero) + "/dados" while True: dets = requests.get(url_details, headers={"authorization": authorization}) djson = dets.json() sleep(1) if djson['result']['status'] == "CONCLUIDO": break return dets.json() @staticmethod def formata_dados(df): df['modalidade_natureza'] = df.apply(lambda x : x['modalidade_natureza'].replace(" ", "") if isinstance(x['modalidade_natureza'], str) else "OUTROS", axis=1) df['valor'] = df.apply(lambda x : x['valor'].split("R$ ")[1].replace(",", "."), axis=1) df["valor"] = df.apply(lambda x : float(x["valor"]), axis=1) return df def get_infos_dividas(self, js, tp_pendencia): res = js.get("result").get(tp_pendencia) if not res is None: df = pd.DataFrame(res.get('itens')) cols = self.campos_divida.get(tp_pendencia) if "uf" in list(df.columns): cols = cols + ["uf"] df = df[cols].copy() else: df = df[cols] df["uf"] = None rename = self.campos_rename.get(tp_pendencia) if not rename is None: df.rename(columns = rename, inplace=True) df["tipo"] = tp_pendencia return df return None def gera_dados(self, doc): numero = self.get_numero_consulta(doc) js = self.get_details(numero) if len(doc) > 11: self.consulta = js fr = [] lista_pendencias = ["restricoes", "processos", "protestos", "pendencias", "restricoes_financeiras"] for el in lista_pendencias: res = self.get_infos_dividas(js, el) if not res is None: fr.append(res) if len(fr) == 0: return pd.DataFrame() df = pd.concat(fr) df = self.formata_dados(df) if len(doc) > 11: self.estados_dividas = df["uf"].unique().tolist() return df def calcula_dispersao_divida(self): uf_cnpj = self.consulta.get("result").get("cnpj").get("localizacao").get("estado") lista_dispersao = [el for el in self.estados_dividas if el!= uf_cnpj] print(lista_dispersao) dispersao = len(lista_dispersao)/4 self.dispersao_divida = dispersao return def get_idade(self): data_abertura = self.consulta.get("result").get("cnpj").get("data_abertura") data_abertura = data_abertura.replace("/", "-") data = datetime.strptime(data_abertura, "%d-%m-%Y").date() idade = ((datetime.now().date() - data).days/366) self.idade_empresa = np.around(idade, 2) return def atribui_segmento(self, df): df['segmento'] = df.apply(lambda x : 'processos' if x['tipo']=='processos' else('credito' if x['modalidade_natureza'] in self.segmentos.get("credito") else ('infra' if x['modalidade_natureza'] in self.segmentos.get("infra") else "outros")), axis=1) return df @staticmethod def calcula_probabilidade(df): dt = df.groupby("segmento").count().reset_index()[["segmento", "valor"]] dt.columns = ["segmento", "ocorrencias"] dt["probabilidade"] = dt["ocorrencias"]/dt["ocorrencias"].sum() return dt @staticmethod def calcula_composicao(df): dt = df.groupby("segmento").sum().reset_index() dt.columns = ["segmento", "valor_divida"] dt["composicao"] = dt["valor_divida"]/dt["valor_divida"].sum() return dt def calcula_pi(self, dfcalc): dfcalc['pi'] = dfcalc['valor_divida']/dfcalc['fat_medio'] dfcalc['pi'] = (1/self.fator_elegibilidade)*dfcalc['pi'] return dfcalc @property def peso_segmento(self): return { "credito" : 4, "processos" : 3, "infra" : 2, "outros" : 1 } @property def fator_segmento(self): return {"credito" : 1, "processos" : 0.8, "infra" : 0.6, "outros" : 0.4} def lambda_(self, c, p, segmento): f = self.fator_segmento.get(segmento) return c*p*f def calcula_lambda(self, dfcalc): dfcalc["lambda"] = dfcalc.apply(lambda x : self.lambda_(x["composicao"], x["pi"], x["segmento"]), axis=1) return dfcalc @staticmethod def calcula_risco(dfcalc): dfcalc["risco"] = dfcalc["probabilidade"]*dfcalc["lambda"] return dfcalc @staticmethod def d_score(risco_, score_limite): return -score_limite*risco_ + score_limite def calcula_dscore(self, dfcalc): if self.baseline_type == 'lscore': score_limite = 1*self.lscore else: score_limite = self.baseline dfcalc["dscore"] = dfcalc.apply(lambda x : self.d_score(x["risco"], score_limite) if x["pi"] <=1 else 0, axis=1) return dfcalc def get_metricas(self, dfcalc): segmentos = ["credito", "processos", "infra", "outros"] final = {} for el in segmentos: dt = dfcalc[dfcalc["segmento"]==el] res = {} if dt.empty: res["num_ocorr"] = 0 res["comp"] = 0 res["risco"] = 0 final[el] = res else: res["num_ocorr"] = dt["ocorrencias"].iloc[0] res["comp"] = dt['composicao'].iloc[0] res["risco"] = dt["risco"].iloc[0] final[el] = res self.metricas = final return def update_dataset(self): df_metricas = pd.DataFrame() df_metricas["cnpj"] = [self.cnpj] df_metricas["produto"] = [self.produto] df_metricas["data_metricas"] = [datetime.now().date()] df_metricas["data_modelo"] = [None] df_metricas["num_ocorr_cr"] = [self.metricas.get('credito').get('num_ocorr')] df_metricas["num_ocorr_proc"] = [self.metricas.get('processos').get('num_ocorr')] df_metricas["num_ocorr_infra"] = [self.metricas.get('infra').get('num_ocorr')] df_metricas["num_ocorr_out"] = [self.metricas.get('outros').get('num_ocorr')] df_metricas["comp_cr"] = [self.metricas.get('credito').get('comp')] df_metricas["comp_proc"] = [self.metricas.get('processos').get('comp')] df_metricas["comp_infra"] = [self.metricas.get('infra').get('comp')] df_metricas["comp_out"] = [self.metricas.get('outros').get('comp')] df_metricas["risco_cr"] = [self.metricas.get('credito').get('risco')] df_metricas["risco_proc"] = [self.metricas.get('processos').get('risco')] df_metricas["risco_infra"] = [self.metricas.get('infra').get('risco')] df_metricas["risco_out"] = [self.metricas.get('outros').get('risco')] df_metricas["idade_empresa"] = [self.idade_empresa] df_metricas["dispersao_divida"] = [self.dispersao_divida] df_metricas["outlier"] = [None] engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/credit_model") con = engine.connect() con.execute("delete from outlier_detection where cnpj='{}'".format(self.cnpj)) df_metricas.to_sql('outlier_detection', schema='credit_model', con=con, if_exists='append', index=False) con.close() return def get_socios(self): schema = 'credito-digital' if self.produto != 'moip' else self.produto engine = mysql_engine(schema) query = "select cpf from tb_Socio where cnpj='{}'".format(self.cnpj) with CaptalysDBContext(engine) as db: res = db.session.execute(query).fetchall() lista_socios = [el[0] for el in res] self.lista_socios = lista_socios return lista_socios def calcula_socios(self): lista_socios = self.get_socios() resp = [] for el in lista_socios: _df = self.gera_dados(el) if not _df.empty: resp.append(_df) if len(resp) == 0: return np.nan df = pd.concat(resp) df = self.atribui_segmento(df) dfp = self.calcula_probabilidade(df) dfc = self.calcula_composicao(df) dfcalc = dfp.merge(dfc, left_on="segmento", right_on="segmento", how='left') dfcalc['fat_medio'] = self.faturamento_medio dfcalc = self.calcula_pi(dfcalc) dfcalc = self.calcula_lambda(dfcalc) dfcalc = self.calcula_risco(dfcalc) dfcalc = self.calcula_dscore(dfcalc) self.get_metricas(dfcalc) dscore = dfcalc['dscore'].mean() lista_segmentos = dfcalc["segmento"].tolist() lista_dscore = dfcalc["dscore"].tolist() lista_dscore = [int(el) for el in lista_dscore] res = dict(zip(lista_segmentos, lista_dscore)) res["lscore"] = int(self.lscore) res['dscore'] = int(dscore) res['score'] = int((self.lscore + dscore)/2) return res, dfcalc def calcula(self, update=True): self.score_mestre() doc = self.cnpj df = self.gera_dados(doc) if df.empty: return {}, None self.calcula_dispersao_divida() self.get_idade() df = self.atribui_segmento(df) dfp = self.calcula_probabilidade(df) dfc = self.calcula_composicao(df) dfcalc = dfp.merge(dfc, left_on="segmento", right_on="segmento", how='left') dfcalc['fat_medio'] = self.faturamento_medio dfcalc = self.calcula_pi(dfcalc) dfcalc = self.calcula_lambda(dfcalc) dfcalc = self.calcula_risco(dfcalc) dfcalc = self.calcula_dscore(dfcalc) self.get_metricas(dfcalc) if update: self.update_dataset() dscore = dfcalc['dscore'].mean() lista_segmentos = dfcalc["segmento"].tolist() lista_dscore = dfcalc["dscore"].tolist() lista_dscore = [int(el) for el in lista_dscore] res = dict(zip(lista_segmentos, lista_dscore)) res["lscore"] = int(self.lscore) res['dscore'] = int(dscore) res['score'] = int((self.lscore + dscore)/2) dfcalc["baseline_type"] = self.baseline_type dfcalc["baseline"] = self.lscore if self.baseline_type == 'lscore' else 1000 dfcalc["fator_elegibilidade"] = self.fator_elegibilidade dfcalc["cnpj"] = self.cnpj dfcalc["produto"] = self.produto dfcalc["data_ref"] = datetime.now().date() return res, dfcalc # if __name__ == '__main__': # ds = DScoring(cnpj='14534748000189', produto='tomatico') # res, dfcalc = ds.calcula(update=False) # print(dfcalc.columns) # print(res) # print(dfcalc) # - #escarlate cnpj_exemplo = '14534748000189' ds = DScoring(cnpj=cnpj_exemplo, produto='tomatico') res, dfcalc = ds.calcula() res dfcalc total_dividas = dfcalc['valor_divida'].sum() total_dividas/47309.444167 df_modelo[df_modelo["cnpj"]=='21666147000195'] lista_cobranca = ["14534748000189", "21666147000195", "22861508000117", "13339876000109", "25401132000156", "26998230000185", "14550173000198", "04015113000111", "20694842000106"] dfcobranca = df_modelo[df_modelo['cnpj'].isin(lista_cobranca)] dfcobranca engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/credit_model") con = engine.connect() dfscore = pd.read_sql("select * from debt_score") def getData(): engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/credit_model") con = engine.connect() df = pd.read_sql("select * from outlier_detection", con) con.close() df_modelo = df[["cnpj", "num_ocorr_cr", "num_ocorr_proc", "num_ocorr_infra", "num_ocorr_out", "comp_cr", "comp_proc", "comp_infra", "comp_out", "risco_cr", "risco_proc", "risco_infra", "risco_out", "idade_empresa", "dispersao_divida"]] df_modelo.dropna(inplace=True) # df_modelo = df_modelo[df_modelo['cnpj'].isin[(['00000000000191', '60701190000104'])]] df_modelo["dispersao"] = df_modelo["dispersao_divida"]*(4/27) df_modelo.drop(columns=['dispersao_divida'], axis=1, inplace=True) df_modelo.index = df_modelo.cnpj df_modelo = df_modelo.drop("cnpj", 1) df_modelo.fillna(0, inplace=True) return df_modelo, df[["cnpj", "produto"]] df_modelo, df = getData() df_modelo.shape df.head() df_modelo.head() df_modelo["dispersao"].max() # + # outlier detection # + X = df_modelo.copy() outlier_detect = IsolationForest(n_estimators=100, max_samples='auto', contamination=.5, max_features=X.shape[1], random_state=1) outlier_detect.fit(X) outliers_predicted = outlier_detect.predict(X) df_modelo["outlier"] = outliers_predicted df_modelo[df_modelo['outlier']==-1].head() # - df_modelo.reset_index(inplace=True) df_modelo.shape res = df_modelo.merge(df, left_on=['cnpj'], right_on='cnpj', how='left') res = df_modelo.merge(df[["cnpj", "produto"]], left_on=['cnpj'], right_on='cnpj', how='left') res[res['cnpj']=='93379337000132'] res[res['outlier']==1].describe() res[res['outlier']==1]['comp_cr'].unique().tolist() res[res['outlier']==1]['comp_proc'].unique().tolist() res[res['outlier']==1]["comp_infra"].unique().tolist() res[res['outlier']==-1].describe() res[res['cnpj']=='14534748000189'] dfout = res[res['outlier']==-1] dfout[dfout['cnpj']=='00226038000187'] dfout[dfout['comp_proc']==1] dfout[dfout['num_ocorr_proc']==99] dfout['num_ocorr_proc'].max() import matplotlib.pyplot as plt plt.hist(x=df_modelo["num_ocorr_proc"], bins=20) plt.hist(x=df_modelo["num_ocorr_cr"], bins=20) plt.hist(x=df_modelo["num_ocorr_infra"], bins=20) plt.hist(x=df_modelo["num_ocorr_out"], bins=20) plt.hist(x=df_modelo["comp_cr"], bins=20) plt.hist(x=df_modelo["comp_proc"], bins=20) plt.hist(x=df_modelo["comp_infra"], bins=20) plt.hist(x=dfout["comp_infra"], bins=20) plt.hist(x=df_modelo["comp_out"], bins=20) plt.hist(x=df_modelo["risco_cr"], bins=20) plt.hist(x=dfout["risco_cr"], bins=20) plt.hist(x=df_modelo["risco_proc"], bins=20) plt.hist(x=df_modelo["risco_infra"], bins=20) plt.hist(x=dfout["risco_infra"], bins=20) plt.hist(x=df_modelo["risco_out"], bins=20) plt.hist(df_modelo['idade_empresa']) plt.hist(df_modelo["dispersao"]) dfout.to_excel("modelo_outlier2.xlsx") dt = df.reset_index() dt.head() dt.rename(columns={"dispersao_dividas" : "dispersao_divida"}, inplace=True) # salva base de outliers engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys..amazonaws.com:23306/varejo") con = engine.connect() dt.to_sql("outlier_detection", schema="varejo", con=con, if_exists='append', index=False) con.close() from sqlalchemy import create_engine # + # outliers visualization using PCA # - from sklearn.preprocessing import StandardScaler X.head() features = list(X.columns) x = X.loc[:, features].values x x = StandardScaler().fit_transform(x) x y = df.loc[:, ["outlier"]].values df_pca = X.copy() df_pca.iloc[:, :] = x # + # df_pca["outlier"] = y # - df_pca.head() # + pca = PCA(n_components=2) principalComponents = pca.fit_transform(df_pca) df_pca = pd.DataFrame(data = principalComponents , columns = ['pc1', 'pc2']) df_pca["outlier"] = outliers_predicted fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) targets = ["outlier", "inlier"] colors = ['r', 'b'] for target, color in zip(targets,colors): indicesToKeep = df_pca['outlier'] == -1 if target == "outlier" else df_pca['outlier'] == 1 ax.scatter(df_pca.loc[indicesToKeep, 'pc1'] , df_pca.loc[indicesToKeep, 'pc2'] , c = color , s = 50) ax.legend(targets) ax.grid() # - df.head() df_out = df[df['outlier']==-1] df_out = df_out.reset_index() df_out.sort_values("idade").head(10) df.sort_values('num_ocorr_infra', ascending=False) df.sort_values("num_ocorr_proc", ascending=False).head() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Decoradores ¿Qué son? # ### Funciones que añaden funcionalidades a otras funciones. # # Por eso se las denomina "decoradores", porque "decoran" a otras funciones. Les añaden funcionalidades. # # ### Estructura de un decorador: # # - 3 funciones (A, B, C) donde A recibe como parámetro a B para devolver C. # - Un decorador devuelve una función # ``` # def función_decorador(función): # def función_interna(): # # código de la función interna # return función_interna # ``` # Ejemplo base # + def suma(): print(15+20) def resta(): print(30-10) suma() resta() # + def suma(): print('vamos a realizar un cálculo') print(15+20) print('hemos terminado el cálculo') def resta(): print('vamos a realizar un cálculo') print(30-10) print('hemos terminado el cálculo') suma() resta() # - # Lo mismo pero empleando un decorador # Ahora se mejorara, pensar que pueden ser 100, lo que se requiere es añadir una nueva funcionalidad a todas las funciones. Sol.: Agregar una función decoradora y aplicar a la que se quiere. # # Nececidad: Imprimir un mensaje antes y después de realizar el cálculo. # + def funcion_decoradora(funcion_parametro): def funcion_interior(): print("vamos a realizar un cálculo") funcion_parametro() print("hemos terminado el cálculo") return funcion_interior @funcion_decoradora def suma(): print(15+20) @funcion_decoradora def resta(): print(30-10) suma() resta() # - # ### Decoradores con parámetros # El ejemplo anterior sólo se limita a imprimir mensajes; ahora se implementara para que las funciones reciban parámetros. # + def funcion_decoradora(funcion_parametro): def funcion_interior(*args): print("vamos a realizar un cálculo") funcion_parametro(*args) print("hemos terminado el cálculo") return funcion_interior @funcion_decoradora def suma(num1, num2): print(num1+num2) @funcion_decoradora def resta(num1, num2): print(num1-num2) suma(2,3) resta(3,2) # - # #### Ahora utilizando parámetros keyword # + def funcion_decoradora(funcion_parametro): def funcion_interior(*args, **kwords): print("vamos a realizar un cálculo") funcion_parametro(*args, **kwords) print("hemos terminado el cálculo") return funcion_interior @funcion_decoradora def suma(num1, num2): print(num1+num2) @funcion_decoradora def resta(num1, num2): print(num1-num2) @funcion_decoradora def potencia(base, exponente): print(pow(base, exponente)) suma(2,3) resta(3,2) potencia(base=2, exponente=3) # - # Y ahora logrando que las funciones devuelvan resultados... # + def funcion_decoradora(funcion_parametro): def funcion_interior(*args, **kwords): print("vamos a realizar un cálculo") salida = funcion_parametro(*args, **kwords) print("hemos terminado el cálculo") return salida return funcion_interior @funcion_decoradora def suma(num1, num2): return num1+num2 @funcion_decoradora def resta(num1, num2): return num1-num2 @funcion_decoradora def potencia(base, exponente): return pow(base, exponente) print(suma(2,3)) print(resta(3,2)) print(potencia(base=2, exponente=3)) # - # #### Agregando argumentos al decorador # + def funcion_decoradora(is_valid = True): def _funcion_decoradora(funcion_parametro): def before_action(): print("vamos a realizar un cálculo") def after_action(): print("hemos terminado el cálculo") def funcion_interior(*args, **kwords): if is_valid: before_action() salida = funcion_parametro(*args, **kwords) after_action() return salida return funcion_interior return _funcion_decoradora @funcion_decoradora(is_valid=False) def suma(num1, num2): return num1+num2 @funcion_decoradora() def resta(num1, num2): return num1-num2 @funcion_decoradora() def potencia(base, exponente): return pow(base, exponente) print(suma(2,3)) print(resta(3,2)) print(potencia(base=2, exponente=3)) # - # ### Otros ejemplos # + def todo_mayuscula(f): def mayuscula(): return f().upper() return mayuscula @todo_mayuscula def holamundo(): return 'Hola, mundo!' hola = holamundo() print(hola) # - # #### Etiquetas html # + def tag_html(func): def interna(*args, **kwargs): return '' + func(*args, **kwargs) + '' return interna @tag_html def hola(): return "Dentro de función" print(hola()) # - # #### Validando parámetros # Validación de enteros # + from functools import wraps def validate_type(type): def validate(func): @wraps(func) def inner(*args): if all(isinstance(val, type) for val in args): return func(*args) return inner return validate @validate_type(int) def suma(x, y): return x + y print(suma(1, 1)) print(suma.__name__) # - # Validación con parámetros # + def validate_type(type): def validate(func): def inner(*args, **kwargs): if all(isinstance(val, type) for val in args): return func(*args) return inner return validate @validate_type(str) def suma_entera(x, y): return x + y print(suma_entera('a', 'a')) # - # Logs # # + def log(func): def inner(*args): print(func.__name__, f'args:{args}') return func(*args) return inner @log def suma(x, y): return x + y print(suma(2, 3)) # - # Uso de esto en flask http://flask.palletsprojects.com/en/1.1.x/quickstart/#quickstart # Obteniendo nombres de funciones # + from functools import wraps def validate_type(type): def validate(func): @wraps(func) def inner(*args, **kwargs): if all(isinstance(val, type) for val in args): return func(*args) return inner return validate @validate_type(int) def suma_entera(x, y): return x + y print(suma_entera.__name__) print(suma_entera) # + from functools import lru_cache @lru_cache() def fib(n): #print(n) if n < 2: return n return fib(n-2) + fib(n-1) print(fib(6)) # - # Otra función importante... # + from functools import singledispatch from collections.abc import MutableSequence @singledispatch def generar_html(x): return x @generar_html.register(int) def _generar_con_int(x): return f'{x}' @generar_html.register(MutableSequence) def _generar_con_list(x): return f'

{x}

' @generar_html.register(str) def _generar_con_str(x): return f'{x} ' generar_html('Hola') # - # #### Controlando el tiempo de ejecución # + def calc_square(numbers): result = [] for number in numbers: result.append(number*number) return result def calc_cube(numbers): result = [] for number in numbers: result.append(number*number*number) return result array = range(1, 100000) out_square = calc_square(array) out_cube = calc_cube(array) #print(out_cube) # - # Requerimiento: Medir los tiempos que le toma a cada función terminar de procesar la lista de números. Reto: Reconocer que se puede mejorar en el código. # + import time def calc_square(numbers): start = time.time() result = [] for number in numbers: result.append(number*number) end = time.time() print('calc_square took ' + str((end-start)*1000) + ' mil sec') return result def calc_cube(numbers): start = time.time() result = [] for number in numbers: result.append(number*number*number) end = time.time() print('calc_cube took ' + str((end-start)*1000) + ' mil sec') return result array = range(1, 100000) out_square = calc_square(array) out_cube = calc_cube(array) #print(out_cube) # - # Las funciones son objetos de primera clase en python. Lo que significa es que pueden tratarse como cualquier otra variable y puede pasarlos como argumento a otra función o incluso devolverlos como un valor de retorno. # + jupyter={"outputs_hidden": true} import time def time_it(func): def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) end = time.time() print(func.__name__ + " took " + str((end-start)*1000) + " mil sec") return result return wrapper @time_it def calc_square(numbers): result = [] for number in numbers: result.append(number*number) return result @time_it def calc_cube(numbers): result = [] for number in numbers: result.append(number*number*number) return result array = range(1, 100000) out_square = calc_square(array) out_cube = calc_cube(array) # - # ### Decorador para medir el tiempo de ejecución y escribir logs # + from functools import wraps import time def my_logger(orig_func): import logging logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO) @wraps(orig_func) def wrapper(*args, **kwargs): logging.info(f'Ran with args: {args}, and kwargs {kwargs}') return orig_func(*args, **kwargs) return wrapper def my_timer(orig_func): @wraps(orig_func) def wrapper(*args, **kwargs): t1 = time.time() result = orig_func(*args, **kwargs) t2 = time.time() - t1 print(f'{orig_func.__name__} ran in: {t2} sec.') return result return wrapper @my_logger @my_timer def display_info(name, age): time.sleep(1) #print(f'display_info ran with arguments ({name}, {age})') display_info('María', 23) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Building offline iPhone spam classifier using coreml # # iOS 11 introduced message extension to filter spam messages and coreml to build custom machine learned models to predict spam or not. In this article I’ll go over all the steps I took to build a machine learning model and how I added it to an iphone project. # # ## Load Modules # %matplotlib inline from io import open import matplotlib.pyplot as plt import csv from textblob import TextBlob import pandas import sklearn import pickle import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC, LinearSVC from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import StratifiedKFold, cross_val_score, train_test_split #from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.learning_curve import learning_curve from nltk.stem.snowball import SnowballStemmer from nltk.stem.porter import PorterStemmer from nltk.tokenize import word_tokenize import string plt.style.use('ggplot') pd.options.display.mpl_style = 'default' # # Data # # SMSSpamCollection lets load and understand the data. # # + file_name = 'SMSSpamCollection' with open(file_name) as f: corpus = f.readlines() print("there are {} messages".format(len(corpus))) corpus = [x.strip() for x in corpus] for i,message in enumerate(corpus[:10]): print(i, message) # - # There spam and ham messages are separated by tab. we can use a csv loader to read. # + with open('SMSSpamCollection',encoding='UTF-8') as f: messages = pd.read_csv(f, sep='\t', quoting=csv.QUOTE_NONE, names=['label', 'message']) messages['message'] = messages['message'].map(lambda text:text.decode(encoding='utf-8')) messages.head() # - messages.groupby('label').describe() messages['length'] = messages['message'].map(lambda text:len(text)) messages.head() messages.length.plot(bins=20, kind='hist') messages.length.describe() messages.hist(column='length', by='label', bins=50) # # # Processing Data # # we need to break the sentences into tokens and stem before we can use it. lets create a method for tokenizing the words. # + print("removing punctuations: "+string.punctuation) stemmer = PorterStemmer() def tokenize(message): """ removes punctuation and tokenizes the words and stems each word. """ msg = "".join([ch for ch in message if ch not in string.punctuation]) # get rid of punctuations tokens = word_tokenize(msg) #stems = [stemmer.stem(x).lower() for x in tokens] #correct way to do stems = [x.lower() for x in tokens] #iOS does not have porterstemmer, we are going to not use stem for now return stems messages.message.head().apply(tokenize) # - # # Feature Vector # # we need to convert the tokenized words into a vector to feed into the ml algorithm. we will create a TF-IDF feature vector from the sentence. # # 1. create count vector # 2. convert convert vector into tf-idf vector # # + fv = CountVectorizer(analyzer=tokenize).fit(messages.message) print("Total number of words in array", len(fv.vocabulary_)) # - # Lets try to test how different words are represented in the CountVector. # + print(fv.transform(["U dun"])) print("Second:") print(fv.transform(["dun U"])) # - # Notice sentences have same count vector generated. when we fit the vectorizer it creates a vocabulary. this vocabulary is used to create a count vector for all sentences. # # Lets try to vectorize a full sentence from our corpus. print(fv.transform([messages.message[3]])) # # ### Count Vector Vocabulary (words_array) # # iPhone does not have a CountVectorizer so we would need to do this step if we are to analyze a sentence. we will use the vocabulary of countvectorizer and save into words array file. # # words array file is the file with count positions of words and frequency of its occurance. # # this is inturn used to calculate term frequency. # # tf = Ft / Count(F) # # Ft => frequency of term t in current document # Count(F) => total number of words in corpus. (max of Ft in words array) # # lets save this file import json with open('words_array.json', 'wb') as fp: json.dump(fv.vocabulary_, fp) # # Messages Feature Vector # # Let's compose the feature vector for our entire corpus. messages_fv = fv.transform(messages.message) print(messages_fv.shape) # We will use TF-IDF transformer to transform the count vector of corpus into TFIDF vector. # + tfidf = TfidfTransformer().fit(messages_fv) # test tfidf of same message as before. t = tfidf.transform(fv.transform([messages.message[3]])) print(t) # - # We see that the values are all normalized and weights are given according to TF-IDF importance. this gives more relevancy for the model to use. # # Lets compute the tfidf of the entire corpus. messages_tfidf = tfidf.transform(messages_fv) print(messages_tfidf.shape) # ### IDF # # words idf is simple list of words and their IDF values # # idf = log(N/Nt) # # N => number of documents # N => number of documets with word t # # we need to compute tfidf of sentence in iOS and we will need the words_idf values as input for computing the vector. lets save the idf array into a file. This is later used in the iOS code. # + idf = {} idf['idf'] = tfidf.idf_.tolist() with open('words_idf.json', 'wb') as fp: json.dump(idf, fp) print("IDF of corpus :", tfidf.idf_) # - # # Model Training # # For the model we will use a simple Linear SVM. SVM seems to be getting the most accurate results and we can easily use this model in iPhone as well. lets create and train an Linear SVM model. # + # %time spam_detector = LinearSVC().fit(messages_tfidf, messages.label) predictions = spam_detector.predict(messages_tfidf) print('accuracy', accuracy_score(messages['label'], predictions)) print('confusion matrix\n', confusion_matrix(messages['label'], predictions)) print('(row=expected, col=predicted)') # - # It looks like the model has got a really good accuracy. confusion matrix is also showing great results. # # Lets plot this and view this a little nicely plt.matshow(confusion_matrix(messages['label'], predictions), cmap=plt.cm.binary, interpolation='nearest') plt.title('confusion matrix') plt.colorbar() plt.ylabel('expected label') plt.xlabel('predicted label') print(classification_report(messages['label'], predictions)) # # Convert to CoreML # + import coremltools coreml_model = coremltools.converters.sklearn.convert(spam_detector, "message", "spam_or_not") #set parameters of the model coreml_model.short_description = "Classify whether message is spam or not" coreml_model.input_description["message"] = "TFIDF of message to be classified" coreml_model.output_description["spam_or_not"] = "Whether message is spam or not" #save the model coreml_model.save("SpamMessageClassifier.mlmodel") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: amg # language: python # name: amg # --- # # Generate a STAC Catalog # # For better or worse, it is easier to build a STAC catalog at the same time that the STAC items are created. Below we generate a catalog and metadata for a directory of data # --------------------- # ## Step I: Make the catalog # + import datetime import json import os from pathlib import Path import ssl # This is require for verification / validation using remote resources when inside the network ssl._create_default_https_context = ssl._create_unverified_context import pystac from pystac import Collection, SpatialExtent, TemporalExtent, Extent from amg.isismetadata import IsisMetadata from amg.fgdcmetadata import FGDCMetadata, EquirectangularFgdcParser, PolarStereoGraphicFgdcParser from amg.gdalmetadata import GDALMetadata from amg.formatters.stac_formatter import to_stac from amg.formatters.fgdc_formatter import to_fgdc from amg import UnifiedMetadata # + description = """ The Solid State Imager (SSI) on NASA's Galileo spacecraft acquired more than 500 images of Jupiter's moon, Europa, providing the only moderate- to high-resolution images of the moon's surface. Images were acquired as observation sequences during each orbit that targeted the moon. Each of these observation sequences consists of between 1 and 19 images acquired close in time, that typically overlap, have consistent illumination and similar pixel scale. The observations vary from relatively low-resolution hemispherical imaging, to high-resolution targeted images that cover a small portion of the surface. Here we provide average mosaics of each of the individual observation sequences acquired by the Galileo spacecraft. These observation mosaics were constructed from a set of 481 Galileo images that were photogrammetrically controlled globally (along with 221 Voyager 1 and 2 images) to improve their relative locations on Europa's surface. The 92 observation mosaics provide users with nearly the entire Galileo Europa imaging dataset at its native resolution and with improved relative image locations. The Solid State Imager (SSI) on NASA's Galileo spacecraft provided the only moderate- to high-resolution images of Jupiter's moon, Europa. Unfortunately, uncertainty in the position and pointing of the spacecraft, as well as the position and orientation of Europa, when the images were acquired resulted in significant errors in image locations on the surface. The result of these errors is that images acquired during different Galileo orbits, or even at different times during the same orbit, are significantly misaligned (errors of up to 100 km on the surface). Previous work has generated global mosaics of Galileo and Voyager images that photogrammetrically control a subset of the available images to correct their relative locations. However, these efforts result in a "static" mosaic that is projected to a consistent pixel scale, and only use a fraction of the dataset (e.g., high resolution images are not included). The purpose of this current dataset is to increase the usability of the entire Galileo image set by photogrammetrically improving the locations of nearly every Europa image acquired by Galileo, and making them available to the community at their native resolution and in easy-to-use regional mosaics based on their acquisition time. The dataset therefore provides a set of image mosaics that can be used for scientific analysis and mission planning activities. """ coll = Collection(id='usgs_controlled_voy1_voy2_galileo', title='USGS Controlled Europa Voyager 1, Voyager 2, and Galileo Image Data', description=description, extent=Extent(SpatialExtent([-180, -90, 180, 90]), TemporalExtent([datetime.datetime(2021, 1, 1), None])), href='https://asc-jupiter.s3-us-west-2.amazonaws.com/europa/individual_l2/collection.json', license='PDDL-1.0' ) coll.validate() # - # ---------------- # ## Step II: Get a list of the input data # # Below, we are generating the catalog from a list of files that contains full, qualified paths. One could also use glob to generate a file list dynamically from within a notebook. # # The `UPLOAD_DIR` argument defines where we are going to write out the collection and any metadata files. In practice, the workflow that I have been using is: # # 1. Generate the cloud optimized geotiffs (COGs) and stage them into the UPLOAD_DIR # 1. Generate the metadata and collection files, pointing at the original data, and stage them into the UPLOAD_DIR # 1. Push all of the data to S3 # 1. Scrape the new S3 bucket using a local stac-browser # 1. Push the updated stac-browser (it is a static site after all) to the web hosting S3 bucket # + UPLOAD_DIR = '/scratch/ARD/stac/jupiter/europa/' # List the products to generate STAC for... with open('/archive/projects/europa/GLL_FinProducts/observation_lev2_products.lis', 'r') as f: products = f.readlines() products = [Path(p.rstrip()) for p in products] # - # The first few entries from the above list file are printed as a sanity check. print(products[0:2]) # ----------------------- # ## Step III: Cook Metadata and Update the Catalog # Now it is necessary to loop over the individual files and generate appropriate metadata. Before doing that, three items are defined: # - the template to use / parse for metadata # - the overrides # - the mappings # # Checkout the GenererateIndividualMetadata notebook for a full description of these arguments # + FGDC_TEMPLATE = '../templates/europa_individual_l2_fgdc.xml' # Define overrides overrides = {'license': 'PDDL-1.0', 'missions':['Voyager 1', 'Voyager 2', 'Galileo'], 'doi':'https://doi.org/10.5066/P9VKKK7C', 'href':'https://asc-jupiter.s3-us-west-2.amazonaws.com/europa/individual_l2'} # Define mappings mappings = {'bbox':IsisMetadata, } # - # STAC also has a concept of assets or files that are closely associated with one another. For each data set, it is necessary to define the assets template. The code will dynamically populate entries in the list of assets by filling in variables that are indicated by `{}`. For example, in the assets below the title of the first asset reads `'JPEG thumbnail of image {productid}'`. The code parses this string and replaces `{productid}` with the `productid` that is parsed off of the `UnifiedMetadata` object. assets = [{'title':'JPEG thumbnail of image {productid}', 'href':'{href}/{productid}.jpeg', 'media_type':'image/jpeg', 'roles':['thumbnail'], 'key':'thumbnail'}, {'title': 'Cloud optimized GeoTiff for image {productid}', 'href':'{href}/{productid}-cog.tif', 'media_type':'image/tiff; application=geotiff; profile=cloud-optimized', 'roles':['data'], 'key':'B1'}, {'title': 'GDAL PAM Metadata for image {productid}', 'href':'{href}/{productid}-cog.tif.aux.xml', 'media_type':'application/xml', 'roles':['metadata'], 'key':'gdal_metadata'}, {'title': 'FGDC Metadata for image {productid}', 'href':'{href}/{productid}.xml', 'media_type':'application/xml', 'roles':['metadata'], 'key':'fgdc_metadata'}] # The cell below, in this example, is going to run for a fair amount of time simply because lots of metadata files are being generated. # + for f in products: # Perform some mundging on the path to get the base product name without path or file extension f = str(f) basename = os.path.basename(f) outname = os.path.splitext(basename)[0] # Parse the filename (in this case) to get the projection so the correct FGDC projection injection can occurr if 'equi' in f: proj='equirect' elif 'npola' in f or 'spola' in f: proj='polarst' # Create the unified metadata record fgdc = FGDCMetadata(FGDC_TEMPLATE, proj=proj) gd = GDALMetadata(f) imd = IsisMetadata(f) record = UnifiedMetadata([fgdc, gd, imd], overrides=overrides, mappings={'bbox':IsisMetadata, }) # Generate the FGDC metadata fgdc_md = to_fgdc(record) with open(f'{UPLOAD_DIR}/{outname}.xml', 'w') as f: f.write(fgdc_md) # Convert the generic metadata record into a STAC formatted metadata record as_stac = to_stac(record, assets=assets) as_stac.validate() # Add the item to the parent collection. This also adds the collection to the item (win-win) coll.add_item(as_stac) # Write the STAC metadata with open(f'{UPLOAD_DIR}/{outname}.json', 'w') as f: json.dump( as_stac.to_dict(), f, indent=2) # Now write the collection coll.validate() with open(f'{UPLOAD_DIR}/collection.json', 'w') as f: json.dump(coll.to_dict(), f) # - # --------------------------- # ## Step IV: What about using glob? # # It is also sometimes desirable to not have to generate a file list before hand. It is possible to use the glob module to generate a listing similar to the one above. The cells below: # - Create a new collection for Europa mosaics # - Glob a local directory full of said mosaics # - Generate stac and fgdc metadata for the image mosaics # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import numpy as np u = np.array([1.,2.,3.]) v = np.array([4.,5.,6.]) print u*v print np.dot(u, v) print np.outer(u, v) C = np.random.randn(5, 5) print np.linalg.det(C) print np.trace(C) w, v = np.linalg.eig(np.diag((1, 2, 3))) print v, u # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="SymssfV7IcQ7" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="MCjxEZg_IfJo" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/11.Pretrained_Clinical_Pipelines.ipynb) # + [markdown] id="v4uN8ZP_Itdo" # # 11. Pretrained_Clinical_Pipelines # + [markdown] id="YcrO6BlfIwEV" # ## Colab Setup # + id="sDxBKvHjIc5B" import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) # + id="R6iawCF1hDtv" executionInfo={"status": "ok", "timestamp": 1617889661677, "user_tz": -180, "elapsed": 125500, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} # %%capture for k,v in license_keys.items(): # %set_env $k=$v # !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh # !bash jsl_colab_setup.sh -p 2.4.4 # + id="xzNgN33jNHTe" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1617889681495, "user_tz": -180, "elapsed": 142327, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="ff464671-6656-4639-eff8-468dd1585056" import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl import sparknlp params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) print ("Spark NLP Version :", sparknlp.version()) print ("Spark NLP_JSL Version :", sparknlp_jsl.version()) # + [markdown] id="9CzSCbXSKLSC" # # if you want to work with Spark 2.3 # ``` # import os # # # Install java # # # ! apt-get update -qq # # # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # # # # !wget -q https://archive.apache.org/dist/spark/spark-2.3.0/spark-2.3.0-bin-hadoop2.7.tgz # # # # !tar xf spark-2.3.0-bin-hadoop2.7.tgz # # # !pip install -q findspark # # os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" # os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # os.environ["SPARK_HOME"] = "/content/spark-2.3.0-bin-hadoop2.7" # # # ! java -version # # import findspark # findspark.init() # from pyspark.sql import SparkSession # # # # ! pip install --ignore-installed -q spark-nlp==2.7.5 # import sparknlp # # spark = sparknlp.start(spark23=True) # ``` # + [markdown] id="hmhC6kMHKYyh" # ## Pretrained Pipelines # + [markdown] id="dLFDvuupKeGH" # In order to save you from creating a pipeline from scratch, Spark NLP also has a pre-trained pipelines that are already fitted using certain annotators and transformers according to various use cases. # # Here is the list of clinical pre-trained pipelines: # # > These clinical pipelines are trained with `embeddings_healthcare_100d` and accuracies might be 1-2% lower than `embeddings_clinical` which is 200d. # # **1. explain_clinical_doc_carp** : # # > a pipeline with `ner_clinical`, `assertion_dl`, `re_clinical` and `ner_posology`. It will extract clinical and medication entities, assign assertion status and find relationships between clinical entities. # # **2. explain_clinical_doc_era** : # # > a pipeline with `ner_clinical_events`, `assertion_dl` and `re_temporal_events_clinical`. It will extract clinical entities, assign assertion status and find temporal relationships between clinical entities. # # **3. recognize_entities_posology** : # # > a pipeline with `ner_posology`. It will only extract medication entities. # # # ** Since 3rd pipeline is already a subset of 1st and 2nd pipeline, we will only cover the first two pipelines in this notebook. # # **4. explain_clinical_doc_ade** : # # > a pipeline for `Adverse Drug Events (ADE)` with `ner_ade_biobert`, `assertiondl_biobert` and `classifierdl_ade_conversational_biobert`. It will extract `ADE` and `DRUG` clinical entities, assign assertion status to `ADE` entities, and then assign ADE status to a text (`True` means ADE, `False` means not related to ADE). # # **letter codes in the naming conventions:** # # > c : ner_clinical # # > e : ner_clinical_events # # > r : relation extraction # # > p : ner_posology # # > a : assertion # # > ade : adverse drug events # # **Relation Extraction types:** # # `re_clinical` >> TrIP (improved), TrWP (worsened), TrCP (caused problem), TrAP (administered), TrNAP (avoided), TeRP (revealed problem), TeCP (investigate problem), PIP (problems related) # # `re_temporal_events_clinical` >> `AFTER`, `BEFORE`, `OVERLAP` # # + [markdown] id="pK2Tt0ZuRy2B" # ## 1.explain_clinical_doc_carp # # a pipeline with ner_clinical, assertion_dl, re_clinical and ner_posology. It will extract clinical and medication entities, assign assertion status and find relationships between clinical entities. # + id="Y1di9iuJMiXl" from sparknlp.pretrained import PretrainedPipeline # + colab={"base_uri": "https://localhost:8080/"} id="4rg3oe37R1wX" executionInfo={"status": "ok", "timestamp": 1617830337044, "user_tz": -180, "elapsed": 106518, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="75402fc0-f794-4d93-b698-fdf796922a51" pipeline = PretrainedPipeline('explain_clinical_doc_carp', 'en', 'clinical/models') # + colab={"base_uri": "https://localhost:8080/"} id="V7OaKtxjvA7f" executionInfo={"status": "ok", "timestamp": 1617830341328, "user_tz": -180, "elapsed": 872, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="76dd44d8-9ac4-4c66-9182-e0f773664b1e" pipeline.model.stages # + id="d9eRK3IKUurq" # Load pretrained pipeline from local disk: # >> pipeline_local = PretrainedPipeline.from_disk('/root/cache_pretrained/explain_clinical_doc_carp_en_2.5.5_2.4_1597841630062') # + colab={"base_uri": "https://localhost:8080/"} id="3xmhwW2MVKYa" executionInfo={"status": "ok", "timestamp": 1617830357065, "user_tz": -180, "elapsed": 3540, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="1ade5f87-e409-49fd-c1a0-08854c89d1a4" text ="""A 28-year-old female with a history of gestational diabetes mellitus, used to take metformin 1000 mg two times a day, presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . She was seen by the endocrinology service and discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals. """ annotations = pipeline.annotate(text) annotations.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 669} id="X4z-RbyZVvgR" executionInfo={"status": "ok", "timestamp": 1617830360621, "user_tz": -180, "elapsed": 1140, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="09b709c2-127b-445b-bc45-f31def259e41" import pandas as pd rows = list(zip(annotations['tokens'], annotations['clinical_ner_tags'], annotations['ner_tags'], annotations['pos_tags'], annotations['dependencies'])) df = pd.DataFrame(rows, columns = ['tokens','clinical_ner_tags','posology_ner_tags','POS_tags','dependencies']) df.head(20) # + colab={"base_uri": "https://localhost:8080/", "height": 173} id="5Hlwc2M2Xl_y" executionInfo={"status": "ok", "timestamp": 1617830405175, "user_tz": -180, "elapsed": 3063, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="dc165a34-4cef-49b6-c3fe-75d64145052f" text = 'Patient has a headache for the last 2 weeks and appears anxious when she walks fast. No alopecia noted. She denies pain' result = pipeline.fullAnnotate(text)[0] chunks=[] entities=[] status=[] for n,m in zip(result['clinical_ner_chunks'],result['assertion']): chunks.append(n.result) entities.append(n.metadata['entity']) status.append(m.result) df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status}) df # + colab={"base_uri": "https://localhost:8080/", "height": 514} id="wF9PCC18Y7eE" executionInfo={"status": "ok", "timestamp": 1617830413797, "user_tz": -180, "elapsed": 1432, "user": {"displayName": "", "photoUrl": "", "userId": "01037434825541536598"}} outputId="ceb2c8a6-abfe-49fa-89f1-8dfd737e74ef" text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ result = pipeline.fullAnnotate(text)[0] chunks=[] entities=[] begins=[] ends=[] for n in result['ner_chunks']: chunks.append(n.result) begins.append(n.begin) ends.append(n.end) entities.append(n.metadata['entity']) df = pd.DataFrame({'chunks':chunks, 'begin':begins, 'end':ends, 'entities':entities}) df # + id="oFLWugWaViPU" import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) rel_df.confidence = rel_df.confidence.astype(float) return rel_df # + colab={"base_uri": "https://localhost:8080/", "height": 371} id="lRkeOb4RZwJS" executionInfo={"status": "ok", "timestamp": 1617830553422, "user_tz": -180, "elapsed": 1923, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="5c2c00eb-fff4-4042-f1b8-9b6f0e2258d8" text ="""A 28-year-old female with a history of gestational diabetes mellitus, used to take metformin 1000 mg two times a day, presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . She was seen by the endocrinology service and discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals. """ annotations = pipeline.fullAnnotate(text) rel_df = get_relations_df (annotations, 'clinical_relations') rel_df # + colab={"base_uri": "https://localhost:8080/", "height": 566} id="IlFpX8Qm8zNw" executionInfo={"status": "ok", "timestamp": 1617830589273, "user_tz": -180, "elapsed": 2362, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="ae0a2ab9-92d2-4741-bba7-7b48313b2722" text =""" he patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . """ annotations = pipeline.fullAnnotate(text) rel_df = get_relations_df (annotations, 'clinical_relations') rel_df[rel_df.confidence>0.9] # + [markdown] id="ljp4EmCxaDNd" # ## **2. explain_clinical_doc_era** # # > a pipeline with `ner_clinical_events`, `assertion_dl` and `re_temporal_events_clinical`. It will extract clinical entities, assign assertion status and find temporal relationships between clinical entities. # # # + colab={"base_uri": "https://localhost:8080/"} id="BMWEfu1QuXY5" executionInfo={"status": "ok", "timestamp": 1617830690474, "user_tz": -180, "elapsed": 87788, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="8b3e60d3-b3b1-43ed-b184-055f0d613bd5" era_pipeline = PretrainedPipeline('explain_clinical_doc_era', 'en', 'clinical/models') # + colab={"base_uri": "https://localhost:8080/"} id="xWhFRqvpvIOl" executionInfo={"status": "ok", "timestamp": 1617830720851, "user_tz": -180, "elapsed": 912, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="adb55807-d576-48cb-863b-c9b3004f9e19" era_pipeline.model.stages # + id="O9cTLFNUaQNU" text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She denied pain and any headache. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ result = era_pipeline.fullAnnotate(text)[0] # + colab={"base_uri": "https://localhost:8080/"} id="1igwWvsgu7z6" executionInfo={"status": "ok", "timestamp": 1617831121605, "user_tz": -180, "elapsed": 752, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="8de6f206-16f7-4f8d-c2a2-69ac864db605" result.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 638} id="6rejwSLjyW8S" executionInfo={"status": "ok", "timestamp": 1617831273299, "user_tz": -180, "elapsed": 603, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="22243af6-a6c5-421a-a094-8fffe88b8155" import pandas as pd chunks=[] entities=[] begins=[] ends=[] for n in result['clinical_ner_chunks']: chunks.append(n.result) begins.append(n.begin) ends.append(n.end) entities.append(n.metadata['entity']) df = pd.DataFrame({'chunks':chunks, 'begin':begins, 'end':ends, 'entities':entities}) df # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="2pKbjUxEv9z3" executionInfo={"status": "ok", "timestamp": 1617831289854, "user_tz": -180, "elapsed": 908, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="50662d67-df67-438e-d310-463a578c07f3" chunks=[] entities=[] status=[] for n,m in zip(result['clinical_ner_chunks'],result['assertion']): chunks.append(n.result) entities.append(n.metadata['entity']) status.append(m.result) df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status}) df # + colab={"base_uri": "https://localhost:8080/", "height": 700} id="FqN66c0Ou59s" executionInfo={"status": "ok", "timestamp": 1617831311091, "user_tz": -180, "elapsed": 1503, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="e95849ce-9d9f-40d9-ab9f-d9752709a14c" annotations = era_pipeline.fullAnnotate(text) rel_df = get_relations_df (annotations, 'clinical_relations') rel_df # + id="gqaVtu8xv3kT" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1617831405696, "user_tz": -180, "elapsed": 1294, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="18e0e194-d23f-4521-ab80-78bb0144242f" annotations[0]['clinical_relations'] # + [markdown] id="TFBWJmXlMue8" # ## 3.explain_clinical_doc_ade # # A pipeline for `Adverse Drug Events (ADE)` with `ner_ade_healthcare`, and `classifierdl_ade_biobert`. It will extract `ADE` and `DRUG` clinical entities, and then assign ADE status to a text(`Negative` means ADE, `Neutral` means not related to ADE). # + colab={"base_uri": "https://localhost:8080/"} id="oYaFInzXM0Gw" executionInfo={"status": "ok", "timestamp": 1617831478438, "user_tz": -180, "elapsed": 57991, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="aa224f11-2a16-4a62-efda-bc6ac7563d33" ade_pipeline = PretrainedPipeline('explain_clinical_doc_ade', 'en', 'clinical/models') # + colab={"base_uri": "https://localhost:8080/"} id="76WiwihdPf9T" executionInfo={"status": "ok", "timestamp": 1617831489711, "user_tz": -180, "elapsed": 3224, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="f860c0dc-ef61-4761-dfb4-bb1344be4b8c" ade_pipeline.fullAnnotate("I feel a bit drowsy & have a little blurred vision, so far no gastric problems.")[0]['class'][0].metadata # + colab={"base_uri": "https://localhost:8080/", "height": 292} id="9xHqsx4aOQQY" executionInfo={"status": "ok", "timestamp": 1617831518222, "user_tz": -180, "elapsed": 2359, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="8d279afa-bf9f-4dbb-a298-de4e6d09a522" text = "I feel a bit drowsy & have a little blurred vision, so far no gastric problems. I have been on Arthrotec 50 for over 10 years on and off, only taking it when I needed it. Due to my arthritis getting progressively worse, to the point where I am in tears with the agony, gp's started me on 75 twice a day and I have to take it every day for the next month to see how I get on, here goes. So far its been very good, pains almost gone, but I feel a bit weird, didn't have that when on 50." import pandas as pd chunks = [] entities = [] begin =[] end = [] print ('sentence:', text) print() result = ade_pipeline.fullAnnotate(text) print ('ADE status:', result[0]['class'][0].result) print ('prediction probability>> True : ', result[0]['class'][0].metadata['True'], \ 'False: ', result[0]['class'][0].metadata['False']) for n in result[0]['ner_chunk']: begin.append(n.begin) end.append(n.end) chunks.append(n.result) entities.append(n.metadata['entity']) df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'begin': begin, 'end': end}) df # + [markdown] id="u3ZaUbUkiF77" # #### with AssertionDL # + colab={"base_uri": "https://localhost:8080/", "height": 210} id="P9UcxSIBg9C-" executionInfo={"status": "ok", "timestamp": 1617831584382, "user_tz": -180, "elapsed": 2124, "user": {"displayName": "muhammet \u015fanta\u015f", "photoUrl": "", "userId": "01037434825541536598"}} outputId="8603b23a-548b-44a0-9f66-5afafb224500" import pandas as pd text = 'I was on Voltaren for about 4 years and all of the sudden had a minor stroke and had blood clots that traveled to my eye. I had every test in the book done at the hospital, and they couldnt find anything. I was completley healthy! I am thinking it was from the voltaren. I have been off of the drug for 8 months now, and have never felt better. I started eating healthy and working out and that has help alot. I can now sleep all thru the night. I wont take this again. If I have the back pain, I will pop a tylonol instead.' print (text) light_result = ade_pipeline.fullAnnotate(text)[0] chunks=[] entities=[] status=[] for n,m in zip(light_result['ass_ner_chunk'],light_result['assertion']): chunks.append(n.result) entities.append(n.metadata['entity']) status.append(m.result) df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status}) df # + [markdown] id="WJUJhRsDN0P4" # ## 4.ICD10CM to Snomed Code # # This pretrained pipeline maps ICD10CM codes to SNOMED codes without using any text data. You’ll just feed a comma or white space delimited ICD10CM codes and it will return the corresponding SNOMED codes as a list. For the time being, it supports 132K Snomed codes and will be augmented & enriched in the next releases. # + id="URlYyaQnPA1E" icd_snomed_pipeline = PretrainedPipeline("icd10cm_snomed_mapping", "en", "clinical/models") # + colab={"base_uri": "https://localhost:8080/"} id="jhqXGDTSTaEW" outputId="6af91f63-79c7-4965-8b5b-bedd09a6c73d" icd_snomed_pipeline.model.stages # + colab={"base_uri": "https://localhost:8080/"} id="pExgipi5O7vY" outputId="30105360-6471-418e-9ee4-fa39b9394017" icd_snomed_pipeline.annotate('M89.50 I288 H16269') # + [markdown] id="hrgvx5lTXwEt" # **ICD10CM ---------- Details** # # M89.50 ----------------- Osteolysis, unspecified site # I288 ---------------------- Other diseases of pulmonary vessels # H16269 ---------------- Vernal keratoconjunctivitis, with limbar and corneal involvement, unspecified eye # # **SNOMED ----------- Details** # # 733187009 ---------- Osteolysis following surgical procedure on skeletal system # 449433008 ---------- Diffuse stenosis of left pulmonary artery # 51264003 ------------ Limbal AND/OR corneal involvement in vernal conjunctivitis # + [markdown] id="kG1whe5KPVH5" # ## 5.Snomed to ICD10CM Code # This pretrained pipeline maps SNOMED codes to ICD10CM codes without using any text data. You'll just feed a comma or white space delimited SNOMED codes and it will return the corresponding candidate ICD10CM codes as a list (multiple ICD10 codes for each Snomed code). For the time being, it supports 132K Snomed codes and 30K ICD10 codes and will be augmented & enriched in the next releases. # + id="Gk5fU0J0Rzw0" snomed_icd_pipeline = PretrainedPipeline("snomed_icd10cm_mapping","en","clinical/models") # + colab={"base_uri": "https://localhost:8080/"} id="m85pQn8lSJ8l" outputId="b2793aae-8955-4b72-a272-c2e704121241" snomed_icd_pipeline.model.stages # + colab={"base_uri": "https://localhost:8080/"} id="AivemfSaRdjD" outputId="4d1206c5-9774-44ba-84b5-662a247b67d2" snomed_icd_pipeline.annotate('733187009 449433008 51264003') # + [markdown] id="W618yxZYbs5U" # # **SNOMED ----------- Details** # # 733187009 ---------- Osteolysis following surgical procedure on skeletal system # 449433008 ---------- Diffuse stenosis of left pulmonary artery # 51264003 ------------ Limbal AND/OR corneal involvement in vernal conjunctivitis # # # **ICDM10CM -------- Details** # # M89.59 ------------------ Osteolysis, multiple sites # M89.50 ------------------ Osteolysis, unspecified site # M96.89 ------------------ Other intraoperative and postprocedural complications and disorders of the musculoskeletal system # # Q25.6 --------------------- Stenosis of pulmonary artery # I28.8 ---------------------- Other diseases of pulmonary vessels # # H10.45 ------------------- Other chronic allergic conjunctivitis # H10.1 --------------------- Acute atopic conjunctivitis # H16.269 ----------------- Vernal keratoconjunctivitis, with limbar and corneal involvement, unspecified eye # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # language: python # name: python38664bitdc5bcfc3f08f4be2983655d129be0c8b # --- # + # modules we'll use import pandas as pd import numpy as np # for Box-Cox Transformation from scipy import stats # for min_max scaling from mlxtend.preprocessing import minmax_scaling # plotting modules import seaborn as sns import matplotlib.pyplot as plt # read in all our data data= pd.read_csv("/home/hemanth/Documents/XGBoosting/ks-projects-201612.csv",encoding='ISO-8859-1') # set seed for reproducibility np.random.seed(0) # - data.head() data.shape # + # generate 1000 data points randomly drawn from an exponential distribution original_data = np.random.exponential(size = 1000) # mix-max scale the data between 0 and 1 scaled_data = minmax_scaling(original_data, columns = [0]) # plot both together to compare fig, ax=plt.subplots(1,2) sns.distplot(original_data, ax=ax[0]) ax[0].set_title("Original Data") sns.distplot(scaled_data, ax=ax[1]) ax[1].set_title("Scaled data") # + # normalize the exponential data with boxcox normalized_data = stats.boxcox(original_data) # plot both together to compare fig, ax=plt.subplots(1,2) sns.distplot(original_data, ax=ax[0]) ax[0].set_title("Original Data") sns.distplot(normalized_data[0], ax=ax[1]) ax[1].set_title("Normalized data") # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re from collections import Counter import warnings warnings.filterwarnings("ignore") def words(text): return re.findall(r'\w+', text.lower()) words ('EBook of The Adventures of Sherlock Holmes') dictionary = Counter(words(open('data/big.txt').read())) dictionary # Find the correction c, out of all possible candidate corrections, that maximizes the probability that c is the intended correction, given the original word w # # Bayes' Theorem # # **P(c|w) = P(c) P(w|c) / P(w)** # # - **c** - Correction # # # - **w** - Word # # # - **P(c)** - The probability that c appears as a word of English text.
For example, occurrences of "the" make up about 7% of English text, so we should have P(the) = 0.07 # # # - **P(w|c)** - The probability that w would be typed in a text when the author meant c.
For example, P(teh|the) is relatively high, but P(theeexyz|the) would be very low def P(word, N=sum(dictionary.values())): "Probability of `word`" return dictionary[word]/N def correction(word): "Most probable spelling correction for word" return max(candidates(word), key=P) # All known words of edit distance 1 are infinitely more probable than known words of edit distance 2, and infinitely less probable than a known word of edit distance 0 def candidates(word): "Generate possible spelling correction for words" return(known([word]) or known(edits1(word)) or \ known(edits2(word)) or [word]) # we don't need to multiply by a P(w|c) factor, because every candidate at the chosen priority will have the same probability def known(words): "The subset of words that appear in the dictionary" return set(w for w in words if w in dictionary) # **Sime Edit** - deletion (remove one letter), a transposition (swap two adjacent letters), a replacement (change one letter to another) or an insertion (add a letter) def edits1(word): "All edits that are one edit away from word" letters ='abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word)+1)] deletes = [L + R[1:] for L,R in splits if R] transposes = [L + R[1] +R[0]+R[2:] for L, R in splits if len(R)>1] replaces = [L +c +R[1:] for L,R in splits if R for c in letters] inserts = [L+ c+R for L,R in splits for c in letters] return set(deletes + transposes +replaces +inserts) len(edits1('inention')) known(edits1('inention')) def edits2(word): "two edits away from word" return (e2 for e1 in edits1(word) for e2 in edits1(e1)) len(set(edits2('inention'))) candidates('acient') known(edits2('inention')) len(dictionary) sum(dictionary.values()) P('how') P('invention') dictionary.most_common(10) correction('speling') # Missing letter correction('korrectud') #Two letter correction correction('inetion') #Missing two letters ['1','2','3'] or ['4','5'] # ### Test def unit_tests(): assert correction('speling') == 'spelling' # insert assert correction('korrectud') == 'corrected' # replace 2 assert correction('bycycle') == 'bicycle' # replace assert correction('inconvient') == 'inconvenient' # insert 2 assert correction('arrainged') == 'arranged' # delete assert correction('peotry') =='poetry' # transpose assert correction('peotryy') =='poetry' # transpose + delete assert correction('word') == 'word' # known assert correction('quintessential') == 'quintessential' # unknown assert words('This is a TEST.') == ['this', 'is', 'a', 'test'] assert Counter(words('This is a test. 123; A TEST this is.')) == ( Counter({'123': 1, 'a': 2, 'is': 2, 'test': 2, 'this': 2})) assert len(dictionary) == 32198 assert sum(dictionary.values()) == 1115585 # assert dictionary.most_common(10) == [ # ('the', 79808), # ('of', 40024), # ('and', 38311), # ('to', 28765), # ('in', 22020), # ('a', 21124), # ('that', 12512), # ('he', 12401), # ('was', 11410), # ('it', 10681)] # assert dictionary['the'] == 79808 assert P('quintessential') == 0 assert 0.07 < P('the') < 0.08 return 'unit_tests pass' def spelltest(tests, verbose=False): "Run correction(wrong) on all (right, wrong) pairs; report results." import time start = time.clock() good, unknown = 0, 0 n = len(tests) for right, wrong in tests: w = correction(wrong) good += (w == right) if w != right: unknown += (right not in dictionary) if verbose: print('correction({}) => {} ({}); expected {} ({})' .format(wrong, w, dictionary[w], right, dictionary[right])) dt = time.clock() - start print('{:.0%} of {} correct ({:.0%} unknown) at {:.0f} words per second ' .format(good / n, n, unknown / n, n / dt)) def Testset(lines): "Parse 'right: wrong1 wrong2' lines into [('right', 'wrong1'), ('right', 'wrong2')] pairs." return [(right, wrong) for (right, wrongs) in (line.split(':') for line in lines) for wrong in wrongs.split()] print(unit_tests()) spelltest(Testset(open('data/spell-testset1.txt'))) # Development set spelltest(Testset(open('data/spell-testset2.txt'))) # Final test set // --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: spylon-kernel // language: scala // name: spylon-kernel // --- spark // + language="python" // from pyspark.ml.linalg import Vectors // import numpy as np // + language="python" // # Load MNIST // import tensorflow as tf // mnist = tf.keras.datasets.mnist // // (x_train, y_train),(x_test, y_test) = mnist.load_data() // x_train, x_test = x_train / 255.0, x_test / 255.0 // #print(x_train.shape, x_test.shape) // x_train=x_train.flatten().reshape(60000, 28*28) // x_test=x_test.flatten().reshape(10000, 28*28) // y_train=y_train.flatten().reshape(60000, 1) // y_test=y_test.flatten().reshape(10000, 1) // + language="python" // x_train.shape // + language="python" // y_x_tr = np.hstack([y_train, x_train]) // dff = map(lambda y_x: ( // int(y_x[0]), Vectors.dense(y_x[1:]) // ), y_x_tr // ) // // mnistdf = spark.createDataFrame(dff, schema=["label", "features"]).cache() // #mnistdf = spark.createDataFrame(, schema=["label", "features"]) // + language="python" // mnistdf.take(1) // + language="python" // print(x_train.shape) // print(y_train.shape) // x_y_tr = np.concatenate(x_train, y_train) // - // + language="python" // from sklearn.linear_model import LogisticRegression // clf = LogisticRegression() // clf.fit(x_train, y_train) // + language="python" // from sklearn import metrics // from sklearn.preprocessing import LabelBinarizer // enc = LabelBinarizer() // enc.fit(y_test) // pred = clf.predict_proba(x_test) // pred_cond = enc.inverse_transform(pred) // // #y_te_exp = enc.transform(y_test) // // print(pred[:1]) // print(metrics.accuracy_score(y_test, pred_cond)) // + language="python" // from sklearn. // - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from keras.datasets import cifar10 from sklearn.preprocessing import OneHotEncoder from tqdm import tqdm_notebook as tqdm from keras.layers import Layer, Input, Conv2D, MaxPool2D, Flatten, Dense, Reshape from keras.models import Model from src.gaussian_attention_layer import VisualAttentionLayer # ### Load data # + (x_train, y_train), (x_test, y_test) = cifar10.load_data() oh_enc = OneHotEncoder(n_values=10, sparse=False) y_train = oh_enc.fit_transform(y_train) y_test = oh_enc.transform(y_test) x_train = x_train/128.0-1.0 x_test = x_test/128.0-1.0 # - # ### Create Model # + inputs = Input(shape=(32,32,3)) x = Conv2D(64, kernel_size=(3,3), activation="relu")(inputs) x = MaxPool2D()(x) x = Conv2D(64, kernel_size=(3,3), activation="relu")(x) x = MaxPool2D()(x) x = Flatten()(x) x = Dense(6, activation="relu")(x) x = VisualAttentionLayer(output_dim=(20,20))([inputs, x]) x = Conv2D(64, kernel_size=(3,3), activation="relu")(x) x = MaxPool2D()(x) x = Conv2D(64, kernel_size=(3,3), activation="relu")(x) x = Flatten()(x) predictions = Dense(10, activation='softmax')(x) model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # - # ### Train the Model history = model.fit(x_train, y_train, epochs=5, batch_size=32, verbose=2, validation_data=(x_test, y_test)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 from pathlib import Path from tqdm import tqdm from news_utils.clean import german # - num_texts_per_file = 100000 # 100 000 input_dir = '/mnt/data/group07/johannes/germanlm/proc/final/' output_dir = '/mnt/data/group07/johannes/germanlm/proc/final_merged/' def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] def merge_into_one(fns, i): with open(output_dir + str(i) + '.txt', 'w') as outfile: for fn in fns: text = Path(fn).read_text() text = german.clean_german(text) outfile.write(text + '\n') Path(output_dir).mkdir(exist_ok=True) for i, fns in tqdm(enumerate(chunks(list(Path(input_dir).glob('*.txt')), num_texts_per_file))): merge_into_one(fns, i) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### INTRODUCTION # # - Read to Data # - Data Visualization # - Data Preprocessing # - Building Model Pipelines # - Data Submission # # ##### Note # After series of test with different models for this task, CatBoost was the best performing model on the test data,so if this notebook must run, the cells where other models were tested should be ignored even though it wont throw up an error as the steps were all wrapped up in a pipeline. In addition, the winner notebook has a roc_auc score of 0.845... whereas this notebook got auc score of 0.843...,which is given by the catboost classifier. # ### Read to Data import pandas as pd train = pd.read_csv('./Train1.csv') test = pd.read_csv('./Test1.csv') train.head() test.head() train.info() train.describe() test.describe() # check for missing values train.isna().sum() test.isna().sum() # The above operation so far shows our data has alot of mising values, now we will explore each field to ascertain which column is trival to us # check the shape of our data print('Train Data Shape'+ str(train.shape)) print('=========================') print( 'Test Data Shape'+ str(test.shape)) # Our data shows we have two major categorical variable aside the ApplicantID which is set out for clients profiling, they are the target variable(default status) and form field47.Now lets take a look at the distribution of the target variable by using data visualization ## We explore percentage of the missing values in our train data total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum()/train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total,percent],axis=1,keys =['Total','Percent']) missing_data[missing_data.Total > 0] total = test.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum()/test.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total,percent],axis=1,keys =['Total','Percent']) missing_data[missing_data.Total > 0] # ### Data Visualization # ###### Exploring our categorical data for more insight. # import visualization libraries import matplotlib.pyplot as plt import numpy as np import matplotlib as pyplot import seaborn as sns sns.set_style('whitegrid') # %matplotlib inline # Exploring our traget variable print(train['default_status'].value_counts()) # creating a pictorial visual for the above info labels = 'Yes', 'No' sizes = [train.default_status[train['default_status']=='yes'].count(), train.default_status[train['default_status']=='no'].count()] explode = (0, 0.2) fig1, ax1 = plt.subplots(figsize=(15,8)) ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow = True, startangle=90) ax1.axis('equal') plt.title('Proportion of customers who defaulted ??') plt.show() # The above diagram suggested 24.5% of customers are loan defaulters whereas those who did not difault are with the percantage 75.5%. Now our major concern is to build a model that will correctly track these small percentage of customers who are most likely to default loan issued by the company. ## exploring the form field 47 train['form_field47'].unique() sns.countplot(x='form_field47',hue='default_status', data=train) plt.title('Chat on Distribution of default status based on Product Type') plt.show() # ###### observations: # - The above indicates clearly that between the customers who applied for charge and lending services, that customers who go for charge services type have greater chances of loan default # - On the other hand, it is also observed that there is a greater number of customers who applied for charge and did not default their loans,therefore indicating large number of customers on both classes. This could be as a result of the fact most of the customers prefer charge services. # ### Data Preprocessing # # ###### Exploring our continous variables for more insight # Now we explore our numeric variables but we need to take care of the missing values first. sns.heatmap(train.isnull()) plt.show() # The missing values in both our train and test data will be filled with their respective median,the reason i chose to fill with median instead of mean is because median is not affected by outliers so it would be wise to fill with it so our original data will be well intact without deformities train.fillna(train.median(), inplace=True) #train = train.fillna(-99) # We also fill for our test data test.fillna(test.median(), inplace=True) #test = test.fillna(-99) # verifying if our missing train data have been filled sns.heatmap(train.isnull()) plt.show() # verifying if our missing test data have been filled test.isna().sum() #now lets verify whether our train data still retains the original info train.describe() # As we can see our data is still retaining its original info after filling the missing values with their respective median,also our continous data is numeric in nature we will look at how each field is distributed for a better analysis. Now let us go ahead to explore them train.hist(bins=20,figsize=(12,16)) plt.show() # It appears most of these fields are not properly distributed,but we will look at how some of these fields interact with the target variable. # + fig, ax = plt.subplots(5,2, figsize=(20,20)) sns.boxplot(y='form_field1',x='default_status',hue='default_status', data=train, ax=ax[0][0]) sns.boxplot(y='form_field26', x='default_status',hue='default_status',data=train, ax=ax[0][1]) sns.boxplot(y='form_field27', x='default_status',hue='default_status', data=train, ax=ax[1][0]) sns.boxplot(y='form_field29',x='default_status',hue='default_status',data=train, ax=ax[1][1]) sns.boxplot(y='form_field42',x='default_status',hue='default_status',data=train, ax=ax[2][0]) sns.boxplot(y='form_field44',x='default_status',hue='default_status',data=train, ax=ax[2][1]) sns.boxplot(y='form_field25',x='default_status',hue='default_status',data=train, ax=ax[3][0]) sns.boxplot(y='form_field30',x='default_status',hue='default_status',data=train, ax=ax[3][1]) sns.boxplot(y='form_field43',x='default_status',hue='default_status',data=train, ax=ax[4][0]) sns.boxplot(y='form_field33',x='default_status',hue='default_status',data=train, ax=ax[4][1]) plt.show() # - # ###### observations: # - Based on credit worthiness for form_field1, there is a greater percentage of customers who appear to be credit worthy based on their historic data compared to the customers whom are not # - Unsurprisingly for form_field26(Tenure of active credit card). Customers with lower number of tenure of all active credit cards are loan defaulters. # - The above immediate assumption is also applicable for form_field27(Tenure of revolving active credit cards) # - Worringly for the organization a greater percentage of the customers default their loan compared to those who do not with respect to the financial stress index across all customers.This could be worrisome to the organisation as it would incure increasing cost of fund to borrowers as majority of the customers'financial stress index is way below average. # - There is no much significant difference in the ratio of the maximum amount due on all active credit lines and the sum of the amounts due on all active credit lines with customers who are likely to default their loan as in the case of form_field44. # ### Feature Engineering # Now we prepare our data features for our model building. First we start by taking care of our categorical features, I will be applying the One Hot encoding to transform the categorical features, which will help generating dummy variables that also have a correlation with the target variable. # + # apply one hot encoding to our train data #train = pd.get_dummies(train, columns=['form_field47']) # apply one hot encoding to our test data #test = pd.get_dummies(test, columns=['form_field47']) # - # Now we apply label encoder to transform our categorical train and test set from sklearn.preprocessing import LabelEncoder train['default_status'] = LabelEncoder().fit_transform(train.default_status) train['form_field47'] = LabelEncoder().fit_transform(train.form_field47) test['form_field47'] = LabelEncoder().fit_transform(test.form_field47) # feature selection , here we drop the ApplicantID for profiling X = train.drop(columns=['Applicant_ID','default_status'], axis=1) y = train.default_status #y = pd.factorize(train['default_status'])[0].reshape(-1) # We are going to be using only our train data for testing, we carve out a portion for training and a portion for evaluating our model in the ratio of 70:30 respectively from sklearn.model_selection import train_test_split # splitting our train data into train and validation x_train,x_eval,y_train,y_eval = train_test_split(X,y, test_size=0.3, stratify = y, shuffle=True,random_state=1) # + #import libraries for oversampling task #import imblearn #from pandas import DataFrame #from imblearn.over_sampling import SMOTE #smote = SMOTE(random_state =1) #x_train_bal,y_train_bal = smote.fit_sample(x_train,y_train) # + # Our data is numeric in nature so it would be imperative to scale the data for better performance by setting them to a mean values of 0s and std of 1s from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train_scaled = scaler.fit_transform(x_train) x_eval_scaled = scaler.transform(x_eval) # - # Before testing our model on other algorithms lets apply logistic Regression to help us map out features that contribute greatly to the possibilites of a customer defaulting their loans by using the coefficient plots. # + from sklearn.linear_model import LogisticRegression model = LogisticRegression() result = model.fit(x_train_scaled,y_train) y_pred = model.predict(x_eval_scaled) # - # print model coefficients and store in a list coefficients = result.coef_.tolist()[0] #create an empty dataframe to store values coefficients_df = pd.DataFrame() coefficients_df['Features'] = x_train.columns # create a new column called coefficients coefficients_df['Coefficient'] = coefficients # plot coefficient values coefficients_df= coefficients_df.sort_values(by='Coefficient', ascending=False) fig,ax = plt.subplots(figsize=(15,15)) sns.barplot(x='Coefficient',y='Features', data=coefficients_df) plt.title('Factors contributring to loan default') plt.xlabel('Logistic Regression Coefficients') plt.axvline(x=0, color='black') sns.despine() # The above chat shows the summary of how the features correlates with the target variable,on the left side indicates negative correlation meaning that increase these features leads to a decreased chance in the observation of customer loan default. With reverse being the case for the features on the right side with positive coeficients # ### Building Models # *I will be testing the following Algorithms for evaluation;* # # - Balanced RandomForest Classifier # - ExtraTreesClassifier # - XGB Boosting(Extreme Gradient Boosting Classifier) # - Lightgbm Classifer # - Decision Tree Classifier # + #importing neccessary libraries for building our model from sklearn.ensemble import ExtraTreesClassifier from imblearn.ensemble import BalancedRandomForestClassifier from sklearn.tree import DecisionTreeClassifier from catboost import CatBoostClassifier import lightgbm as lgb import xgboost as xgb # import neccessary libraries for result evaluation from sklearn.metrics import recall_score , accuracy_score from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score import sklearn.metrics as metrics from sklearn.metrics import classification_report from sklearn.metrics import plot_confusion_matrix import warnings warnings.filterwarnings('ignore') # - # storing our model in a list models = [ ['BalancedRandomForestClassifier:', BalancedRandomForestClassifier()], ['ExtraTreesClassifier:', ExtraTreesClassifier()], ['lightgbm:',lgb.LGBMClassifier()], ['DecisionTreeClassifier:', DecisionTreeClassifier()], ['Xgboost:', xgb.XGBClassifier()] ] model_data = [] for name,curr_model in models: curr_model_data = {} curr_model.random_state = 42 curr_model_data['Name'] = name curr_model.fit(x_train_scaled,y_train) # Measuring the model performance curr_model_data['Accuracy Score']= metrics.accuracy_score(y_eval,curr_model.predict(x_eval_scaled)) curr_model_data['Recall Score']= metrics.recall_score(y_eval,curr_model.predict(x_eval_scaled)) curr_model_data['roc_auc_score']= metrics.roc_auc_score(y_eval,curr_model.predict(x_eval_scaled)) # we try and measure the difference between the train set score and evaluated score to capture any signal of overfitting curr_model_data['Train Set Score'] = curr_model.score(x_train_scaled,y_train) curr_model_data['Test Set Score'] = curr_model.score(x_eval_scaled,y_eval) model_data.append(curr_model_data) # print our result model_data # storing the above result in a dataFrame for better visualization df = pd.DataFrame(model_data) df # As we can see the balanced random forest classifier has a better result than the rest of the model bearing in mind we are to predict the probabilities and not the actual labels of customers who default their loan. Prior to this point the probabilites have been predicted using the brfc as the model but,but it didnt do well on the test data as it gave a [LB_score of 0.8137].Which could mean the model overfitted and didnt capture the true negatives quite well. I will be selecting the best three models in the above result and try to improve it by applying polynomial features of degree 2. # ###### Metric Result evaluation # #### CatBoost Testing # + # assign categorical features #cat_features = X_copy.loc[:, ['form_field47','default_status']] #print(cat_features) # - from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import Pipeline # + # i will wrap some of the steps in a pipeline for easy application trans = PolynomialFeatures(degree=2) model1 = CatBoostClassifier( iterations =100, depth=8, random_seed = 42, learning_rate =0.1, eval_metric = 'AUC', #cat_features = cat_features, verbose = False ) # - cat_pipeline = Pipeline(steps=[('t', trans), ('m', model1)]) cat_pipeline.fit(x_train_scaled,y_train) cat_pipeline_pred = cat_pipeline.predict_proba(x_eval_scaled) # + cat_pipeline_roc_curve = roc_auc_score(y_eval, cat_pipeline.predict(x_eval_scaled)) fpr,tpr,thresholds = roc_curve(y_eval,cat_pipeline.predict_proba(x_eval_scaled)[:,1]) plt.figure() plt.plot(fpr,tpr,label='Catboost(area=%0.2f)' % cat_pipeline_roc_curve) plt.plot([0, 1],[0, 1],'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positve Rate') plt.title('Reciever Operating Characteristic') plt.legend(loc='lower right') plt.show() # - # ### Testing for lightgbm ## Applying polynomial features to lightgbm model model2 = lgb.LGBMClassifier() lgb_pipeline = Pipeline(steps=[('t', trans), ('m', model2)]) lgb_pipeline.fit(x_train_scaled,y_train) lgb_pipeline_pred = lgb_pipeline.predict_proba(x_eval_scaled) # + lgb_pipeline_roc_curve = roc_auc_score(y_eval, lgb_pipeline.predict(x_eval_scaled)) fpr,tpr,thresholds = roc_curve(y_eval,lgb_pipeline.predict_proba(x_eval_scaled)[:,1]) plt.figure() plt.plot(fpr,tpr,label='LightgbmClassifier(area=%0.2f)' % lgb_pipeline_roc_curve) plt.plot([0, 1],[0, 1],'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positve Rate') plt.title('Reciever Operating Characteristic') plt.legend(loc='lower right') plt.show() # - # ### Testing for BalancedRandomforest # Applying polynomial features to randomforest model3 = BalancedRandomForestClassifier(n_estimators=500,bootstrap=False,class_weight='balanced', random_state=0) brfc_pipeline = Pipeline(steps=[('t', trans), ('m', model3)]) brfc_pipeline.fit(x_train_scaled,y_train) brfc_pipeline_pred = brfc_pipeline.predict_proba(x_eval_scaled) # + brfc_pipeline_roc_curve = roc_auc_score(y_eval, brfc_pipeline.predict(x_eval_scaled)) fpr,tpr,thresholds = roc_curve(y_eval,brfc_pipeline.predict_proba(x_eval_scaled)[:,1]) plt.figure() plt.plot(fpr,tpr,label='Balanced Rfc(area=%0.2f)' % brfc_pipeline_roc_curve) plt.plot([0, 1],[0, 1],'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positve Rate') plt.title('Reciever Operating Characteristic') plt.legend(loc='lower right') plt.show() # - # The above confusion matrix is showing we have 9489+3062 correct prediction and 1053 + 3106 incorrect prediction. Now the baseline of our model is not to miss out on any clients who has a high probability of default their loan. So to improve this classification i will be selecting the top 9 features with negative coefficients and the top 9 features with a positive coefficients with the target variable to see how it performs on the test data # ### Apply full train to our Data # ###### Train data # + X_train = train.drop(columns=['Applicant_ID','default_status'],axis =1) y_train = train.default_status X_train_scaled = scaler.fit_transform(X_train) # - # ###### Test data # + X_test = test.drop(columns=['Applicant_ID'], axis =1) X_test_scaled = scaler.transform(X_test) # - cat_pipeline.fit(X_train_scaled,y_train) cat_pipeline_pred = cat_pipeline.predict_proba(X_test_scaled) # ###### load the submission file submission_df = pd.read_csv('./SampleSubmission.csv') submission_df.head() np.testing.assert_array_equal(test.index.values, submission_df.index.values) submission_df['default_status'] = cat_pipeline_pred[:, 1] submission_df.head() # saving the submission file to csv submission_df.to_csv('Loan_default_cat7_pipeline.csv', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf from tensorflow_model_optimization.quantization.keras import vitis_quantize import numpy as np from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator model = tf.keras.models.load_model('classifier.h5') quantizer = vitis_quantize.VitisQuantizer(model) test_datagen = ImageDataGenerator(rescale = 1./255) test_set = test_datagen.flow_from_directory('dataset/test_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') # + quantized_model = quantizer.quantize_model(calib_dataset = test_set) # - Quant_model_path = 'vitis-ai-quantized-cat_dog-classification-model.h5' # Save the model quantized_model.save(Quant_model_path) from tensorflow_model_optimization.quantization.keras import vitis_quantize with vitis_quantize.quantize_scope(): quantized_model = tf.keras.models.load_model(Quant_model_path) quantized_model quantized_model.summary() # !vai_c_tensorflow2 -h # !vai_c_tensorflow2 -m vitis-ai-quantized-cat_dog-classification-model.h5 -a /opt/vitis_ai/compiler/arch/DPUCADX8G/ALVEO/arch.json -o vitis-compiled/ -n cat_dog_XIR # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Discussion Activity: Pantries and Cooking # # ## Group Names and Roles # # - Partner 1 (Role) # - Partner 2 (Role) # - Partner 3 (Role) # # In this activity, we'll create a class for managing a pantry of food ingredients. This class will interact with the `recipe` class from a previous worksheet, allowing us to check whether we have enough ingredients to make the desired recipe. We'll also be able to add ingredients to the pantry, representing "going shopping." Schematically: # # $$\text{go shopping} \implies \text{ingredients in pantry} \implies \text{cook recipes}\;.$$ # ## Part A # # A `Pantry` is a subclass of `dict` that supports addition (with `dict`s) and subtraction (with `recipe`s). The code below implements a simple `Pantry` class with entrywise addition. If this code looks a bit familiar, that's because it is! This is just a rebranded `ArithmeticDict` from the [first lecture on inheritance](https://nbviewer.jupyter.org/github/PhilChodrow/PIC16A/blob/master/content/object_oriented_programming/inheritance_I.ipynb). # # Run this block. # # ***Note:*** *In a more thorough implementation of `__add__()` and subsequent methods, we would do input checking to ensure that we are dealing with dictionaries with integer or float values. Because we've already practiced input checking when we wrote the `recipe` class, we're not going to worry about that again here.* # + # run this block # used for warning for low ingredients (Part E) import warnings class Pantry(dict): """ A dictionary class that supports entrywise addition. """ # supplied to students def __add__(self, to_add): """ Add the contents of a dictionary to_add to self entrywise. Keys present in to_add but not in self are treated as though they are present in self with value 0. Similarly, keys present in self but not in to_add are treated as though they are present in to_add with value 0. """ new = {} keys1 = set(self.keys()) keys2 = set(to_add.keys()) all_keys = keys1.union(keys2) for key in all_keys: new.update({key : self.get(key,0) + to_add.get(key,0)}) return Pantry(new) # implement subtraction in Part B here # in Part B, you don't have to worry about # edge cases -- please read the directions! # additionally, no need for copy/paste. # it's ok to just modify this code block. def __sub__(self, recipe): pass # - # Let's say that we'd like to make some delicious chocolate chip cookies. But wait -- we don't have any chocolate chips in our pantry! (Run this block): my_pantry = Pantry({"flour (grams)" : 2000, "sugar (grams)" : 1000, "butter (grams)" : 500, "salt (grams)" : 1000}) # In the code cell below, use addition to add to your pantry. To do so, first make a `dict` called `grocery_trip` in which you buy: # # - 1000 grams of flour # - 500 grams of butter # - 500 grams of chocolate chips # - 2 onions # # The format should be the same as `my_pantry`. For example, `grocery_trip` might begin like this: # # ```python # grocery_trip = { # "flour (grams)" : 1000, # ... # } # ``` # # Then, add the contents of `grocery_trip` to `my_pantry`. Check the result to ensure that it makes sense. # your solution here # ## Part B # # Here is solution code for the `Recipe` class from last time. To simplify the code, we have removed the input checking in the `__init__` method, as well as the `__str__` method. class Recipe: def __init__(self, title, ingredients, directions): self.title = title self.ingredients = ingredients self.directions = directions def __rmul__(self, multiplier): multiplied_ingredients = {key : multiplier*val for key, val in self.ingredients.items()} return recipe(self.title, multiplied_ingredients, self.directions) # Now, implement **subtraction** in which the first argument is a `Pantry` and the second argument is a `Recipe`. The relevant magic method for this is called `__sub__()`, and should be implemented in the `Pantry` class. Here's how subtraction `my_pantry - my_recipe` should work: # # 1. You may assume that `my_pantry` and `my_recipe` are valid instances of their class. In particular, all quantities of ingredients are positive numbers (`int`s or `float`s). # 2. If all keys from `my_recipe.ingredients` are present in `my_pantry`, and if they all have values smaller than their values in `my_pantry`, then the result of `my_pantry - my_recipe` is a new `Pantry` object in which the values corresponding to the keys have been reduced by the quantity in `my_recipe`. # 3. If a key is present in `my_pantry` but not in `my_recipe`, then it is treated as though it is present in `my_pantry` with value `0`. # # For now, you can assume that the conditions of clause 2. are met, and that subtraction should therefore "work." That is, you can assume that you have enough of all ingredients in the pantry to make the recipe. For example, with `my_pantry` from Part A, # # ```python # title = "cookies" # ingredients = { # "flour (grams)" : 400, # "butter (grams)" : 200, # "salt (grams)" : 10, # "sugar (grams)" : 100 # } # # # Great British Baking Show-style directions # directions = ["make the cookies"] # # cookies = Recipe(title, ingredients, directions) # my_pantry - cookies # ``` # ``` # {'salt (grams)': 990, # 'flour (grams)': 2600, # 'butter (grams)': 800, # 'chocolate chips (grams)': 500, # 'sugar (grams)': 900, # 'onions': 2} # ``` # # You can implement subtraction by modifying the code block in Part A -- no need to copy/paste your class. # # ***Hint***: *Dictionary comprehensions* provide a convenient way to make new dictionaries from old ones. Their syntax is related to list comprehensions. For example: # ```python # d = {"shortbread cookie" : 2, "chocolate chip cookie" : 1} # {"tasty " + key : val for key, val in d.items()} # ``` # # ***Hint***: The method `dict.get()` will let you specify a "default" value in a dictionary, returned when a key is not found. For example, # ```python # {"cinnamon cookie" : 1, "florentine cookie" : 1}.get("brownie", 0) # ``` # will return value 0 because the key `"brownie"` is not found. # test your solution here # reminder: changes you make to your pantry class # won't be reflected until you run the definition # AND create an object with the new definition. # second reminder: you'll only get the same answer # we got above if you run the required code # exactly once. You might need to re-run your # code if you've already tried to run a few times. # ## Part D # # Now, handle the case in which `my_recipe` contains a key not contained in `my_pantry`, or in which case the associated value in `my_recipe` is larger. This models the situation in which your recipe requires an ingredient that you don't have, or that you don't have in sufficient quantity. # # In this case, `my_pantry - my_recipe` should raise an informative `ValueError`, stating which ingredients need to be added in order to make the recipe. For example, here's `my_pantry` as it was at the end of Part A. # # ```python # my_pantry = Pantry({'salt (grams)': 1000, # 'flour (grams)': 4000, # 'butter (grams)': 1500, # 'chocolate chips (grams)': 1000, # 'sugar (grams)': 1000, # 'onions': 2}) # # title = "oatmeal cookies" # ingredients = { # "oatmeal (grams)" : 200, # "flour (grams)" : 500, # "sugar (grams)" : 100, # "raisins (grams)" : 200, # "butter (grams)" : 200 # } # directions = ["make the cookies"] # # my_recipe = Recipe(title, ingredients, directions) # # my_pantry - my_recipe # ``` # # In this case, an informative `ValueError` should be raised indicating which ingredients are ndeed, and in what quantities. # # ***Hints*** # # Implementing this behavior efficiently can be a bit challenging. Here are some suggestions: # # 1. The command `set(L1).union(set(L2))` will create a set of all items in either `L1` or `L2`, where `L1` and `L2` are lists or other iterables. # 2. I found it helpful to first create a dictionary containing the full state of the pantry after subtraction, with negative numbers allowed. I then checked to see whether there were any negative numbers in this dictionary, raising the `ValueError` if so. # ## Part E # # If you have completed Parts A-D and still have some time, do one of two things: # # 1. Implement an `__str__()` method for your class to enable attractive printing. # 2. Modify the subtraction method so that a *warning* is shown when subtraction results in an ingredient running low. For the purposes of today, let's say that "low" means that there are fewer than 100g of the ingredient in the pantry. # # To issue warnings, you need to `import` the `warnings` module and then call `warnings.warn` with the text of the warning. # # ```python # import warnings # # ... other code # warnings.warn("Uh oh! These ingredients are running low.: ... ") # ``` # test your new features here # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- def my_function(x,y,z=1.5): if z > 1: return z * ( x + y ) else: return z / ( x + y ) my_function(5,6,z=0.) my_function(3.14,7,z=3.5) def f(): a = 5 b = 6 c = 7 return a , b , c a,b,c = f() print(a) print(b) print(c) import re def clean_strings(strings): result = [] for value in strings: value = value.strip() value = re.sub(r'[!#?]', '', value) value = value.title() result.append(value) return result states = [' Alabama ', 'Georgia!', 'Georgia', 'georgia', 'FlOrIda', 'south carolina##', 'West virginia?'] clean_strings(states) # + def remove_punctuation(value): return re.sub(r'[!#?]', '', value) clean_ops = [str.strip,remove_punctuation, str.title] def clean_strings1(strings, ops): result = [] for value in strings: for func in ops: value = func(value) result.append(value) return result # - clean_strings1(states, clean_ops) some_dict = {'a':1,'b':2,'c':3} for key in some_dict: print(key) def squares(n = 10): print("Generating squares from 1 to {0}".format(n**2)) for i in range(1,n+1): yield i**2 gen = squares() gen list(gen) gen = (x ** 2 for x in range(100)) gen list(gen) # ### itertools import itertools first_letter = lambda x: x[0] names = ['Alan', 'Adam', 'Wes', 'Will', 'Albert','Steven'] for letter , names in itertools.groupby(names, first_letter): print(letter, list(names)) # letter is the first letter of each name # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import networkx as nx g = nx.chordal_cycle_graph(97) g.nodes() T = nx.dfs_tree(g, 0, depth_limit=3) T.edges() g[0] tmp = [[0,u] for u in g[0]] [ [1, *v] for v in tmp ] def get_paths(graph, v, depth): if depth == 1: # return neighbors paths = [[v, u] for u in graph[v]] return(paths) else: # DFS paths = [] for n in graph[v]: subpaths = get_paths(graph, n, depth-1) for u in subpaths: paths.append([v, *u]) return(paths) paths=[] paths.extend(get_paths(g, 1, 2)) # + paths # - paths.extend(get_paths(g, 0, 2)) paths np.array(paths).shape import sparse sparse.COO(np.array(paths).T, ) n = g.number_of_nodes() tuple([n for i in range(3)]) sparse.COO(np.array(paths).T, data=1, shape=tuple([n for i in range(3)])) from maxnorm.graphs import * mask = obs_mask_expander(g, 4) mask # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # A mesh class for a tesseroid relief # # There is a `PrismRelief` mesh class in [Fatiando a Terra](http://www.fatiando.org/) but we still need a `TesseroidRelief` for this inversion. This is a mesh of tesseroids distributed along an area. They undulate below and above a reference level, describing the relief of an interface. Tesseroids have either the top of bottom fixed to a reference height. The other end undulates along a relief. The `TesseroidRelief` class is defined in the [`mohoinv.py`](mohoinv.py) module. This notebook will show some of the features of the mesh and how to use it. # ## Package imports # Insert plots into the notebook # %matplotlib inline from __future__ import division, unicode_literals import numpy as np import matplotlib.pyplot as plt from IPython.display import Image import multiprocessing import seaborn # Makes the default style of the plots nicer # Load the required modules from Fatiando a Terra and show the specific version of the library used. from fatiando import gridder, utils from fatiando.gravmag import tesseroid import fatiando print("Using Fatiando a Terra version: {}".format(fatiando.__version__)) from mohoinv import TesseroidRelief # ## Create some synthetic relief # Define a regular grid. # shape is nlat, nlon = the number of points in the grid shape = (41, 31) # Make a regular grid inside an area = (s, n, w, e) area = (20, 60, -40, 40) lat, lon, h = gridder.regular(area, shape, z=250e3) # The model area is slightly larger because the points generated above are in the center of each cell. dlat, dlon = gridder.spacing(area, shape) s, n, w, e = area modelarea = (s - dlat/2, n + dlat/2, w - dlon/2, e + dlon/2) # Make a checker board relief undulating along a specified height reference. f = 0.2 reference = -35e3 relief = 10e3*np.sin(0.5*f*lon)*np.cos(f*lat) + reference plt.figure(figsize=(7, 3)) plt.title('Synthetic relief') plt.axis('scaled') plt.pcolormesh(lon.reshape(shape), lat.reshape(shape), relief.reshape(shape), cmap="RdYlBu_r") plt.colorbar(pad=0.01).set_label('meters') plt.xlim(lon.min(), lon.max()) plt.ylim(lat.min(), lat.max()) plt.tight_layout() # Set a density contrast for the relief. The density contrast is negative if the relief is below the reference and positive otherwise. density = 600*np.ones_like(relief) density[relief < reference] *= -1 plt.figure(figsize=(7, 3)) plt.title('Relief density contrast') plt.axis('scaled') plt.pcolormesh(lon.reshape(shape), lat.reshape(shape), density.reshape(shape), cmap="RdBu_r") plt.colorbar(pad=0.01).set_label(u'kg/m³') plt.xlim(lon.min(), lon.max()) plt.ylim(lat.min(), lat.max()) plt.tight_layout() # Now we can create a mesh. sample_mesh = TesseroidRelief(modelarea, shape, relief, reference, {'density': density}) # ## Calculate the gravitational effect of this mesh # # The mesh behaves like a list of `Tesseroid` objects. So we can pass it to any function in Fatiando a Terra that takes such list as input. # # Below, we'll show an example of using the forward modeling functions in Fatiando to calculate the gravitational effect of the above relief. ncpu = multiprocessing.cpu_count() print('Number of cores: {}'.format(ncpu)) # Forward model the data on the grid generated above using all available cores of the processor. data = tesseroid.gz(lon, lat, h, sample_mesh, njobs=ncpu) # The warnings above are because some small tesseroids (below 10 cm dimensions) are ignored to avoid numerical issues. plt.figure(figsize=(7, 3)) plt.title('Relief gravity anomaly') plt.axis('scaled') plt.tricontourf(lon, lat, data, 40, cmap="RdBu_r") plt.colorbar(pad=0.01).set_label(u'mGal') plt.xlim(lon.min(), lon.max()) plt.ylim(lat.min(), lat.max()) plt.tight_layout() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %config InlineBackend.figure_format = 'retina' # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import pandas as pd import bq_helper from bq_helper import BigQueryHelper # # When That Photo? # Goal of this project is to train a ML network to provide an estimate of when a photo was taken, where the estimate is a range of years (e.g., 1890-1900) # # Setup Access Via Big Query met = bq_helper.BigQueryHelper(active_project="bigquery-public-data", dataset_name="the_met") bq_assistant = BigQueryHelper("bigquery-public-data", "the_met") bq_assistant.list_tables() pd.set_option('display.max_rows', 150) bq_assistant.table_schema('objects') # ## Exploratory Queries for Photos images_url_query = """SELECT a.object_id, a.object_number, a.title, a.artist_display_name, a.object_name, a.object_date, a.object_begin_date, a.object_end_date, a.medium, a.dimensions, a.classification, b.cropImportanceFraction, b.cropConfidence, b.cropBB, b.cropHintsAnnotation FROM `bigquery-public-data.the_met.objects` a JOIN ( SELECT object_id, cropHintsAnnotation, cropHints.importanceFraction as cropImportanceFraction, cropHints.confidence as cropConfidence, cropHints.boundingPoly.vertices as cropBB FROM `bigquery-public-data.the_met.vision_api_data`, UNNEST(cropHintsAnnotation.cropHints) cropHints ) b ON a.object_id = b.object_id WHERE a.department = "Photographs" AND a.classification IN ("Transparencies", "Photographs", "Negatives") OR a.classification LIKE "Photographs%" """ images_url_response = met.query_to_pandas_safe(images_url_query, max_gb_scanned=20) images_url_response.head(100) images_url_response.to_csv(r'/data/kaggle/met/photographs-02-08-19.csv', index=None, sep=',', mode='a') # ## Photographs Query # Extract some basic info about the object from the `objects` table, and `JOIN` with the `images` table to get the relevant GCloud Storage pointer. # # Export all a subset of this data to a CSV to be used in a bash script for executing `gsutil cp` to download each image gcs_url_query = """ SELECT a.object_id, a.object_name, a.title, a.object_date, b.gcs_url FROM `bigquery-public-data.the_met.objects` a JOIN ( SELECT object_id, gcs_url FROM `bigquery-public-data.the_met.images` ) b ON a.object_id = b.object_id WHERE (a.department = "Photographs" AND a.classification IN ("Transparencies", "Photographs", "Negatives") OR a.classification LIKE "Photographs%") AND ends_with(lower(b.gcs_url), '/0.jpg') """ gcs_url_response = met.query_to_pandas_safe(gcs_url_query, max_gb_scanned=50) gcs_url_response.head(10) len(gcs_url_response) # Grab the `object_id` and `gcs_url` columns to be used by the bash download script gcs_url_response.loc[:, ['object_id','gcs_url']].to_csv(r'/data/kaggle/met/images.csv', header=None, index=None, sep=',', mode='a') # The bash download script handles the fact that trying to pipe a list of URLs to download into `gsutil cp` ignores the directory structure when downloading to the destination and places everything in the same folder (i.e., if any of the files have the same filename, they will overwrite each other). # # ## `gsutil` Download Script # # ``` # # #!/bin/bash # while IFS=, read -r col1 col2 # do # gsutil -m cp -r $col2 /data/kaggle/met/images/$col1 # done < /data/kaggle/met/images.csv # ``` # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('..') from src.carregamento.dados import todas_escolas_pd import pandas as pd medias = ['MEDIA_5EF_LP', 'MEDIA_5EF_MT', 'MEDIA_9EF_LP', 'MEDIA_9EF_MT'] todas_escolas_pd[todas_escolas_pd['ID_ESCOLA'] == 26121786][medias] todas_escolas_pd.groupby('NIVEL_SOCIO_ECONOMICO')[medias].describe() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="BTA6oniQR-GP" # # PyKoSpacing # # + colab={"base_uri": "https://localhost:8080/"} id="WITNZqN3Qv6R" outputId="465453ec-7d1b-473a-e8c3-127114327b38" # !pip install git+https://github.com/haven-jeon/PyKoSpacing.git # + id="0QVIFeG3Q15m" sent = '김철수는 극중 두 인격의 사나이 이광수 역을 맡았다. 철수는 한국 유일의 태권도 전승자를 가리는 결전의 날을 앞두고 10년간 함께 훈련한 사형인 유연재(김광수 분)를 찾으러 속세로 내려온 인물이다.' # + colab={"base_uri": "https://localhost:8080/"} id="xB8AVBUkReI-" outputId="ebfbab15-3abb-45dd-b3c8-5067b096cfa7" new_sent = sent.replace(" ", '') print(new_sent) # + colab={"base_uri": "https://localhost:8080/"} id="S8cM9fjvRiau" outputId="9cc9a51d-5911-496f-89c6-fea2392e1563" from pykospacing import spacing kospacing_sent = spacing(new_sent) print(sent) print(kospacing_sent) # + [markdown] id="ApmQdy30SCM3" # # Py-Hanspell # + colab={"base_uri": "https://localhost:8080/"} id="6WXhat4dRtFO" outputId="f3347433-deb3-4fdd-f0a8-d59f933f2c37" # !pip install git+https://github.com/ssut/py-hanspell.git # + colab={"base_uri": "https://localhost:8080/"} id="Od4JMp3XSF6O" outputId="c8110e5a-4717-47b1-ee72-c3bc6dfb6294" from hanspell import spell_checker sent = "맞춤법 틀리면 외 않되? 쓰고싶은대로쓰면돼지 " spelled_sent = spell_checker.check(sent) hanspell_sent = spelled_sent.checked print(hanspell_sent) # + colab={"base_uri": "https://localhost:8080/"} id="IjXjiga7SUWm" outputId="1a46cea6-ee70-4cb6-fe92-259b1322d4f2" spelled_sent = spell_checker.check(new_sent) hanspell_sent = spelled_sent.checked print(hanspell_sent) print(kospacing_sent) # + [markdown] id="rcDtdQcLS7kn" # hanspell 결과에 대한 의견 : 띄어쓰기 체크 나쁘지 않음. 근데 왜 등장인물 이름이 바뀐거징? '유연재'가 잘못되었고 '유연제'를 단어로 인식한건가 # # + colab={"base_uri": "https://localhost:8080/"} id="ChYOXzp-StXV" outputId="0f29bc85-0e76-48dd-cc40-f0508fcd5618" # !pip install soynlp # + colab={"base_uri": "https://localhost:8080/"} id="JMDhR0gDUGN1" outputId="a11d0e9a-c1a2-4c48-ca7b-3d0e3796c325" pip install konlpy # + colab={"base_uri": "https://localhost:8080/"} id="a0zYpW1oUU73" outputId="e9b19c0e-415a-42e5-a709-325c18c98777" from konlpy.tag import Okt tokenizer = Okt() print(tokenizer.morphs('에이비식스 이대휘 1월 최애돌 기부 요정')) # + [markdown] id="2BKi-U_YUftV" # 신조어에 대해 분리된 결과를 나타냄. # 텍스트 데이터의 빈도수에 따라 형태소를 파악하는 토크나이저 soynlp로 분석한다면? # + id="y35xxHyyUY81" import urllib.request from soynlp import DoublespaceLineCorpus from soynlp.word import WordExtractor # + colab={"base_uri": "https://localhost:8080/"} id="x7IUVcebUyY0" outputId="746d5812-0bc6-47c1-f478-99250af4fb58" urllib.request.urlretrieve("https://raw.githubusercontent.com/lovit/soynlp/master/tutorials/2016-10-20.txt", filename="2016-10-20.txt") # + colab={"base_uri": "https://localhost:8080/"} id="pBGBl5snU3HF" outputId="98f3fdd8-187a-4e5b-bddc-68738912b47c" corpus = DoublespaceLineCorpus("2016-10-20.txt") len(corpus) # + colab={"base_uri": "https://localhost:8080/"} id="uOd4h6u1U7Lt" outputId="3e8ee100-b48f-4883-ced4-a8ec8aaedb80" i = 0 for document in corpus: if len(document) > 0: print(document) i = i + 1 if i == 3: break # + [markdown] id="JbniXmsNVNPK" # 학습 기반의 단어 토크나이저이기 때문에 기존의 형태소 분석기와는 다르게 학습 과정을 거쳐야 함. # # - 응집확률(Cohesion Probability) : 연결되어 자주 등장함 # - 브랜칭 엔트로피 : 주어진 문자열에서 얼마나 다음 문자가 등장할 수 있는지를 판단하는 척도임. # 하나의 완성된 단어에 가까워질수록 문맥으로 인해 점점 정확히 예측하면서 브랜칭 엔트로피의 값이 줄어듦. # + colab={"base_uri": "https://localhost:8080/"} id="GD5vol1BVETF" outputId="4f92450b-1789-481a-fc71-877a5df5c8a5" word_extractor = WordExtractor() word_extractor.train(corpus) word_score_table = word_extractor.extract() # + colab={"base_uri": "https://localhost:8080/"} id="xLBpymbRV3AE" outputId="0b7896c4-9b4e-469d-aaa7-7064d740998f" word_score_table["반포한"].cohesion_forward # + colab={"base_uri": "https://localhost:8080/"} id="vlyW1fpsWPWl" outputId="88a59e91-7bbd-4da5-d285-244c2eea55af" word_score_table["반포한강"].cohesion_forward # + colab={"base_uri": "https://localhost:8080/"} id="mJyhuVtoWSVX" outputId="f10e69fc-dce5-45be-c41c-b5b4984dc5e7" word_score_table["반포한강공"].cohesion_forward # + colab={"base_uri": "https://localhost:8080/"} id="2TA86pjXWZum" outputId="9fb366b9-bbdc-4d39-add2-f73d0e0c1c4d" word_score_table["반포한강공원"].cohesion_forward # + colab={"base_uri": "https://localhost:8080/"} id="sOcRCF16WcxP" outputId="3812b6d2-e721-40b1-9374-b49ecce5a31f" word_score_table["반포한강공원에"].cohesion_forward # + [markdown] id="UtiTkSGmo398" # 브랜칭 엔트로피 : 문자 시퀀스에서 다음 문자 예측을 위해 헷갈리는 정도. (줄어들면 좋겠쥬) # + colab={"base_uri": "https://localhost:8080/"} id="9_cjl9xRWdpm" outputId="acf3bed2-e5e1-48e4-d150-743c688ca362" word_score_table["디"].right_branching_entropy # + colab={"base_uri": "https://localhost:8080/"} id="o2mi6naOpKXw" outputId="a082214e-fff1-4f2b-d132-32c4e284b0cd" word_score_table["디스"].right_branching_entropy # + colab={"base_uri": "https://localhost:8080/"} id="8lJFv3ixpM2V" outputId="164131f5-03e6-4fe3-a60b-8978cbf7cbe4" word_score_table["디스플"].right_branching_entropy # + colab={"base_uri": "https://localhost:8080/"} id="wRz4KE5JpPFN" outputId="0a5e4669-d960-49bd-e4d3-e13684ce89f3" word_score_table["디스플레이"].right_branching_entropy # + colab={"base_uri": "https://localhost:8080/"} id="h4fI_JvtpSDd" outputId="1d5c3e96-0525-42af-ea81-5af845ef81fd" from soynlp.tokenizer import LTokenizer scores = { word:score.cohesion_forward for word, score in word_score_table.items()} l_tokenizer = LTokenizer(scores=scores) l_tokenizer.tokenize("국제사회와 우리의 노력들로 범죄를 척결하자", flatten=False) # + colab={"base_uri": "https://localhost:8080/"} id="RH9oQxgJqBO1" outputId="c1d38298-212d-4c92-ad0c-177b6207e4d4" from soynlp.tokenizer import MaxScoreTokenizer maxscore_tokenizer = MaxScoreTokenizer(scores=scores) maxscore_tokenizer.tokenize("국제사회와우리의노력들로범죄를척결하자") # + colab={"base_uri": "https://localhost:8080/"} id="MSeawbPyqgpD" outputId="3c809823-2e3a-4ab4-8d58-9bbc72b3337a" from soynlp.normalizer import * print(emoticon_normalize('앜ㅋㅋㅋㅋ이영화존잼쓰ㅠㅠㅠㅠㅠ', num_repeats=2)) print(emoticon_normalize('앜ㅋㅋㅋㅋㅋㅋㅋㅋㅋ이영화존잼쓰ㅠㅠㅠㅠ', num_repeats=2)) # + colab={"base_uri": "https://localhost:8080/"} id="zPg-QN8Vqq_L" outputId="c27de150-6a19-476d-bd3a-f09b340c55b5" print(repeat_normalize('와하하하하하하하하하핫', num_repeats=2)) # + colab={"base_uri": "https://localhost:8080/"} id="ER8PwBoiqykj" outputId="ab7295a4-8327-49f8-8f63-2a6396fd8fda" print(repeat_normalize('와하하하하하하핫', num_repeats=2)) # + colab={"base_uri": "https://localhost:8080/"} id="EcDdsNXwqzur" outputId="8fd446ff-2e5c-422d-9838-d6f6bdccc7e6" pip install customized_konlpy # + colab={"base_uri": "https://localhost:8080/"} id="0XVdIWW2rgpF" outputId="9db14f5e-c32d-48b6-a7e8-a13b4f4fb55c" from ckonlpy.tag import Twitter twitter = Twitter() twitter.morphs('은경이는 사무실로 갔습니다.') # + id="e04-2udprpmT" twitter.add_dictionary('은경이', 'Noun') # + colab={"base_uri": "https://localhost:8080/"} id="UDKzpfOHrr3L" outputId="013dec5f-ee3e-4130-c1c8-3ab04109277c" twitter.morphs('은경이는 사무실로 갔습니다.') # + id="Wvrrpd90rtX7" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hide_input=false pycharm={"name": "#%%\n"} from IPython.core.display import display, HTML, Markdown from ipywidgets import Button, HBox, VBox, widgets, Layout from tabulate import tabulate import copy # + hide_input=false pycharm={"name": "#%%\n"} campo_minato = [ [ 0 , 2 , 3 , 1 , 1 , 3 , 4 , 7 ,-1 ], [ 2 , 1 ,-1 , 2 ,-1 ,-1 , 7 , 1 , 2 ], [ 4 ,-1 , 2 , 3 , 7 , 1 , 1 , 5 , 4 ], [ 5 , 1 ,-1 , 4 , 5 ,-1 , 9 , 3 , 6 ], [ 1 , 3 , 3 ,-1 , 3 , 1 , 1 ,10 , 8 ], [ 1 , 4 ,-1 , 5 ,-1 , 3 , 1 , 8 , 9 ], [ 7 , 5 , 2 ,-1 , 2 , 2 , 3 , 4 , 0 ], ] m = len(campo_minato) n = len(campo_minato[0]) #mappa = [ [-1]*(n+1) ] + [ ([-1] + r) for r in campo_minatomappa = [ [-1]*(n+1) ] + [ ([-1] + r) for r in campo_minato#] mappa = [["*"]*(n+1)] for r in campo_minato: aux=["*"] for elem in r: aux.append("*") if elem==-1 else aux.append(elem) mappa.append(aux) # + hide_input=true def visualizza(env): if len(env)==m+1 and len(env[0])==n+1: index=[chr(65+i) for i in range(m)] aux=[r[1:] for r in env[1:]] if len(env)==m+2 and len(env[0])==n+2: index=[chr(65+i) for i in range(m)] aux=[r[1:-1] for r in env[1:-1]] for i in range(len(aux)): for j in range(len(aux[0])): if aux[i][j] is None: aux[i][j]="x" columns=[str(i) for i in range(1,n+1)] print(tabulate(aux, headers=columns, tablefmt='fancy_grid', showindex=index)) def evaluation_format(answ, pt_green,pt_red): pt_blue=0 if pt_green!=0: pt_blue=pt_red-pt_green pt_red=0 return f"{answ}. Totalizzeresti [{pt_green} safe pt], \ [{pt_blue} possible pt], \ [{pt_red} out of reach pt].
" def check_num_gems_to(mappa, num_gems_to, return_only_boolan=False): """ verifica che la matrice num_gems_to sia conforme alla consegna (perdonando solamente eventuale anomalia su cella (1,1) ). """ if len(num_gems_to) != m+1: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le righe della matrice $num\_gems\_to$ devono essere $m+1=${m+1}, non {len(num_gems_to)}." if len(num_gems_to[0]) != n+1: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le colonne della matrice $num\_gems\_to$ devono essere $n+1=${n+1}, non {len(num_gems_to[0])}." for i in range (0,m): if num_gems_to[i][0]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $(1,1)$ e pertanto $num\_gems\_to[${i}$][0] = 0$" for j in range (0,n): if num_gems_to[0][j]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $(1,1)$ e pertanto $num\_gemss\_to[0][${j}$] = 0$" num_gems_to_forgiving = copy.deepcopy(num_gems_to) num_gems_to_forgiving[1][1] = 0 for i in range(m,0,-1): for j in range (n,0,-1): if i==1 and j==1: if return_only_boolan: return True return evaluation_format("Si", 10, 10)+"Non riscontro particolari problemi della tua versione della matrice $num\_gems\_to$." if mappa[i][j]=="*" or (num_gems_to_forgiving[i][j-1] is None and num_gems_to_forgiving[i-1][j] is None): if num_gems_to_forgiving[i][j] is not None: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $num\_gems\_to$." else: if num_gems_to_forgiving[i][j]!=max((num_gems_to_forgiving[i-1][j] if num_gems_to_forgiving[i-1][j] is not None else 0),(num_gems_to_forgiving[i][j-1] if num_gems_to_forgiving[i][j-1] is not None else 0)) + mappa[i][j]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $num\_gems\_to$." def check_max_gems_to(mappa, max_gems_to, return_only_boolan=False): """ verifica che la matrice max_gems_to_with_opt sia conforme alla consegna. """ if len(max_gems_to) != m+1: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le righe della matrice $max\_gems\_to$ devono essere $m+1=${m+1}, non {len(max_gems_to)}." if len(max_gems_to[0]) != n+1: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le colonne della matrice $max\_gems\_to$ devono essere $n+1=${n+1}, non {len(max_gems_to[0])}." for i in range (0,m): if max_gems_to[i][0][0]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $(1,1)$ e pertanto $max\_gems\_to\_with\_opt[${i}$][0] = 0$" for j in range (0,n): if max_gems_to[0][j][0]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $(1,1)$ e pertanto $max\_gems\_to[0][${j}$] = 0$" max_gems_to_forgiving = copy.deepcopy(max_gems_to) max_gems_to_forgiving[1][1] = (0,1) for i in range(m,0,-1): for j in range (n,0,-1): if i==1 and j==1: if return_only_boolan: return True return evaluation_format("Si", 10, 10)+"Non riscontro particolari problemi della tua versione della matrice $max\_gems\_to$." if mappa[i][j]=="*" or (max_gems_to_forgiving[i][j-1] is None and max_gems_to_forgiving[i-1][j] is None): if max_gems_to_forgiving[i][j] is not None: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." else: if max_gems_to_forgiving[i][j][0]!=max((max_gems_to_forgiving[i-1][j][0] if max_gems_to_forgiving[i-1][j] is not None else 0),(max_gems_to_forgiving[i][j-1][0] if max_gems_to_forgiving[i][j-1] is not None else 0)) + mappa[i][j]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." if max_gems_to_forgiving[i-1][j] is None: if max_gems_to_forgiving[i][j][1]!=max_gems_to_forgiving[i][j-1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." elif max_gems_to_forgiving[i][j-1] is None: if max_gems_to_forgiving[i][j][1]!=max_gems_to_forgiving[i-1][j][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." else: if max_gems_to_forgiving[i-1][j][0]==max_gems_to_forgiving[i][j-1][0]: if max_gems_to_forgiving[i][j][1]!=max_gems_to_forgiving[i-1][j][1]+max_gems_to_forgiving[i][j-1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." elif max_gems_to_forgiving[i-1][j][0]>max_gems_to_forgiving[i][j-1][0]: if max_gems_to_forgiving[i][j][1]!=max_gems_to_forgiving[i-1][j][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." else: if max_gems_to_forgiving[i][j][1]!=max_gems_to_forgiving[i][j-1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_to$." def check_num_gems_from(mappa, num_gems_from, return_only_boolan=False): """ verifica che la matrice num_gems_from sia conforme alla consegna. """ if len(num_gems_from) != m+2: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le righe della matrice $num\_gems\_from$ devono essere $m+2=${m+2}, non {len(num_gems_from)}." if len(num_gems_from[0]) != n+2: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le colonne della matrice $num\_gems\_from$ devono essere $n+2=${n+2}, non {len(num_gems_from[0])}." for i in range (0,m+1): if num_gems_from[i][n+1]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $({m},{n})$ e pertanto $num\_gems\_from[${i}$][${n}$] = 0$" for j in range (0,n+1): if num_gems_from[m+1][j]!=0: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $({m},{n})$ e pertanto $num\_gems\_from[${m}$][${j}$] = 0$" num_gems_from_forgiving = copy.deepcopy(num_gems_from) num_gems_from_forgiving[m][n] = 0 for i in range(1,m): for j in range (1,n): if mappa[i][j]=="*" or (num_gems_from_forgiving[i][j+1] is None and num_gems_from_forgiving[i+1][j] is None): if num_gems_from_forgiving[i][j] is not None: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $num\_gems\_from$." else: if num_gems_from_forgiving[i][j]!=max((num_gems_from_forgiving[i+1][j] if num_gems_from_forgiving[i+1][j] is not None else 0),(num_gems_from_forgiving[i][j+1] if num_gems_from_forgiving[i][j+1] is not None else 0)) + mappa[i][j]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $num\_gems\_from$." if return_only_boolan: return True return evaluation_format("Si", 10, 10)+"Non riscontro particolari problemi della tua versione della matrice $num\_gems\_from$." def check_max_gems_from(mappa, max_gems_from, return_only_boolan=False): """ verifica che la matrice max_gems_from_with_opt sia conforme alla consegna. """ if len(max_gems_from) != m+2: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le righe della matrice $max\_gems\_from$ devono essere $m+2=${m+2}, non {len(max_gems_from)}." if len(max_gems_from[0]) != n+2: if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Le righe della matrice $max\_gems\_from$ devono essere $m+2=${m+2}, non {len(max_gems_from)}." for i in range (0,m+1): if max_gems_from[i][n+1]!=(0,0): if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $({m},{n})$ e pertanto $max\_gems\_from[${i}$][${n}$] = 0$" for j in range (0,n+1): if max_gems_from[m+1][j]!=(0,0): if return_only_boolan: return False return evaluation_format("No", 0, 10)+f"Attenzione, la raccolta delle gemme deve partire dalla cella $({m},{n})$ e pertanto $max\_gems\_from[${m}$][${j}$] = 0$" max_gems_from_forgiving = copy.deepcopy(max_gems_from) max_gems_from_forgiving[m][n] = (0,1) for i in range(1,m): for j in range (1,n): if mappa[i][j]=="*" or (max_gems_from_forgiving[i][j+1] is None and max_gems_from_forgiving[i+1][j] is None): if max_gems_from_forgiving[i][j] is not None: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." else: if max_gems_from_forgiving[i][j][0]!=max((max_gems_from_forgiving[i+1][j][0] if max_gems_from_forgiving[i+1][j] is not None else 0),(max_gems_from_forgiving[i][j+1][0] if max_gems_from_forgiving[i][j+1] is not None else 0)) + mappa[i][j]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." if max_gems_from_forgiving[i+1][j] is None: if max_gems_from_forgiving[i][j][1]!=max_gems_from_forgiving[i][j+1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." elif max_gems_from_forgiving[i][j+1] is None: if max_gems_from_forgiving[i][j][1]!=max_gems_from_forgiving[i+1][j][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." else: if max_gems_from_forgiving[i+1][j][0]==max_gems_from_forgiving[i][j+1][0]: if max_gems_from_forgiving[i][j][1]!=max_gems_from_forgiving[i+1][j][1]+max_gems_from_forgiving[i][j+1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." elif max_gems_from_forgiving[i+1][j][0]>max_gems_from_forgiving[i][j+1][0]: if max_gems_from_forgiving[i][j][1]!=max_gems_from_forgiving[i+1][j][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." else: if max_gems_from_forgiving[i][j][1]!=max_gems_from_forgiving[i][j+1][1]: if return_only_boolan: return False return evaluation_format("No", 0, 10)+"Ti avviso: riscontro dei problemi nella tua versione della matrice $max\_gems\_from$." if return_only_boolan: return True return evaluation_format("Si", 10, 10)+"Non riscontro particolari problemi della tua versione della matrice $max\_gems\_from$." def Latex_type(string): return string.replace("_", "\_") def visualizza_e_valuta(nome_matrice, matrice): display(Markdown(f"La tua versione attuale della matrice ${Latex_type(nome_matrice)}$ è la seguente:")) visualizza(matrice) display(Markdown(f"Validazione della tua matrice ${Latex_type(nome_matrice)}$:")) display(Markdown(eval(f"check_{nome_matrice}(mappa,matrice)"))) # + #unit test num_gems_to=[ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 5, 6, 7, 10, 14, 21, None], [0, 2, 3, None, 8, None, None, 21, 22, 24], [0, 6, None, None, 11, 18, 19, 22, 27, 31], [0, 11, 12, None, 15, 23, None, 31, 34, 40], [0, 12, 15, 18, None, 26, 27, 32, 44, 52], [0, 13, 19, None, None, None, 30, 33, 52, 61], [0, 20, 25, 27, None, None, 32, 36, 56, 61] ] num_gems_from=[ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 61, 61, 59, 56, 55, 54, 51, 43, None, 0], [0, 27, None, None, 53, None, None, 47, 36, 29, 0], [0, 25, None, 53, 51, 48, 41, 40, 35, 27, 0], [0, 21, 15, None, 41, 37, None, 39, 30, 23, 0], [0, 16, 14, None, None, 32, 29, 28, 27, 17, 0], [0, 15, 11, None, None, None, 21, 18, 17, 9, 0], [0, 14, 7, 2, None, 11, 9, 7, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] max_gems_to=[ [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, 0), (2, 1), (5, 1), (6, 1), (7, 1), (10, 1), (14, 1), (21, 1), None], [(0, 0), (2, 1), (3, 2), None, (8, 1), None, None, (21, 1), (22, 2), (24, 2)], [(0, 0), (6, 1), None, None, (11, 1), (18, 1), (19, 1), (22, 1), (27, 3), (31, 3)], [(0, 0), (11, 1), (12, 1), None, (15, 1), (23, 1), None, (31, 1), (34, 1), (40, 1)], [(0, 0), (12, 1), (15, 2), (18, 2), None, (26, 1), (27, 1), (32, 1), (44, 1), (52, 1)], [(0, 0), (13, 1), (19, 2), None, None, None, (30, 1), (33, 1), (52, 1), (61, 2)], [(0, 0), (20, 1), (25, 1), (27, 1), None, None, (32, 1), (36, 1), (56, 1), (61, 2)] ] max_gems_from=[ [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)], [(0, 0), (61, 2), (61, 2), (59, 2), (56, 2), (55, 2), (54, 2), (51, 2), (43, 2), None, (0, 0)], [(0, 0), (27, 1), None, None, (53, 2), None, None, (47, 2), (36, 2), (29, 1), (0, 0)], [(0, 0), (25, 1), None, (53, 2), (51, 2), (48, 2), (41, 2), (40, 2), (35, 2), (27, 1), (0, 0)], [(0, 0), (21, 1), (15, 1), None, (41, 2), (37, 2), None, (39, 2), (30, 2), (23, 1), (0, 0)], [(0, 0), (16, 1), (14, 1), None, None, (32, 2), (29, 2), (28, 2), (27, 2), (17, 1), (0, 0)], [(0, 0), (15, 1), (11, 1), None, None, None, (21, 1), (18, 1), (17, 1), (9, 1), (0, 0)], [(0, 0), (14, 1), (7, 1), (2, 1), None, (11, 1), (9, 1), (7, 1), (4, 1), (0, 0), (0, 0)], [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)] ] assert (check_num_gems_to(mappa, num_gems_to, return_only_boolan=True) == True) assert (check_num_gems_from(mappa, num_gems_from, return_only_boolan=True) == True) assert (check_max_gems_to(mappa, max_gems_to, return_only_boolan=True) == True) assert (check_max_gems_from(mappa, max_gems_from, return_only_boolan=True) == True) # - # ## Esercizio \[60 pts\] # (campo minato con gemme) Ricerca di cammini in una griglia rettangolare con celle proibite e gemme da raccogliere. # Bimo cammina sulle celle di un campo minato dalla forma di una griglia rettangolare $m\times n$.\ # Le mine sono indicate da un -1 mentre le altre celle che contengono un numero intero >0 # sono tutte transitabili (il numero indica il numero di monete in quella cella).\ # Le mosse consentite portano Bimo dalla cella $(i,j)$ alla cella $(i+1,j)$ oppure $(i,j+1)$, sempre ove queste siano transitabili.\ # Organizzati per calcolare quante monete riesce a raccogliere Bimo tra due celle date e per rispondere ad altre domande di questo tipo. # Notice: Anche se ne hai quì ogni opportunità, non ti è però richiesto in alcun modo di scrivere del codice per condurre a termine il tuo esercizio. Puoi fare tutto a mano e vogliamo essere chiari che noi non facciamo alcuna differenza tra i punti conquistati in un modo piuttosto che in un altro (noi guardiamo ai risultati e ci piace che voi vi ingegniate a modo vostro per portarli a casa, in tutta libertà). Sei incoraggiato piuttosto a ricercare l'approccio per tè più pratico, sicuro, e conveniente. E magari quello che puoi trovare più piacevole e stimolante quando svolgi l'esercizio da casa, dove ti suggerisco sperimentare, potrebbe anche essere diverso . # Ciò nononostante, per facilitare chi di voi volesse scrivere del codice a proprio supporto, abbiamo aggiunto alla mappa di $m$ righe ed $n$ colonne una riga e colonna iniziale (di indice zero), fatte interamente di mine, perchè non si crei confusione col fatto che gli indici di liste ed array in programmazione partono da zero. # Un robot, inizialmente situato nella cella $A1=(1,1)$, deve portarsi nella cella $G9=(7,9)$. # Le celle che riportano un numero negativo contengono una mina od altre trapole mortali, ed il robot deve evitarle. Ogni altra cella contiene il numero di monete rappresentato nella tabella.\ # I movimenti base possibili sono il passo verso destra (ad esempio il primo passo potrebbe avvenire dalla cella $A1$ alla cella $A2$) ed il passo verso il basso (ad esempio, come unica altra alternativa per il primo passo il robot potrebbe posrtarsi quindi nella cella $B1$).\ # Quante monete può raccogliere al massimo il robot in un percorso che vada dalla cella $A1$ alla cella $G9$?\ # E quanti sono i percorsi che gli consentono di raccogliere un tale numero di monete? # + hide_input=true visualizza(mappa) # - # __Richieste__: # \[10 pts\]__ A mano o tramite un programma componi la matrice $num\_gems\_to$ di dimensione $(m+1)\times(n+1)$, nella cui cella $num\_gems\_to[i][j]$, per ogni $i = 0,..., m+1$ e $j = 0,..., n+1$, sia riposto il massimo numero di gemme incontrate da un cammino dalla cella $A1=(1,1)$ alla generica cella $(i,j)$. Se non vi è alcun cammino dalla cella $A1=(1,1)$ alla generica cella $(i,j)$ poni allora $num\_gems\_to[i][j]$ a $None$. num_gems_to=[ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 5, 6, 7, 10, 14, 21, None], [0, 2, 3, None, 8, None, None, 21, 22, 24], [0, 6, None, None, 11, 18, 19, 22, 27, 31], [0, 11, 12, None, 15, 23, None, 31, 34, 40], [0, 12, 15, 18, None, 26, 27, 32, 44, 52], [0, 13, 19, None, None, None, 30, 33, 52, 61], [0, 20, 25, 27, None, None, 32, 36, 56, 61] ] #num_gems_to=conta_gemme(mappa) # + hide_input=false visualizza_e_valuta("num_gems_to",num_gems_to) # + [markdown] hide_input=true # __\[10 pts\]__ Componi ora una matrice $num\_gems\_from$, di dimensione $(m+2)times(n+2)$, nella cui cella $num\_gems\_from[i][j]$, per ogni $i = 1,..., m+1$ e $j = 1,..., n+1$, sia riposto il numero di gemme raccolte dalla generica cella $(i,j)$ alla cella $G9=(7,9)$. # - num_gems_from=[ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 61, 61, 59, 56, 55, 54, 51, 43, None, 0], [0, 27, None, None, 53, None, None, 47, 36, 29, 0], [0, 25, None, 53, 51, 48, 41, 40, 35, 27, 0], [0, 21, 15, None, 41, 37, None, 39, 30, 23, 0], [0, 16, 14, None, None, 32, 29, 28, 27, 17, 0], [0, 15, 11, None, None, None, 21, 18, 17, 9, 0], [0, 14, 7, 2, None, 11, 9, 7, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] #num_gems_from=conta_gemme_rev(mappa) # + hide_input=true visualizza_e_valuta("num_gems_from",num_gems_from) # - # __\[10 pts\]__ A mano o tramite un programma componi la matrice $max\_gems\_to$ di dimensione $(m+1)\times(n+1)$, nella cui cella $max\_gems\_to[i][j]$, per ogni $i = 0,..., m+1$ e $j = 0,..., n+1$, sia riposto il numero di gemme raccolte dalla cella $A1=(1,1)$ e il numero di percorsi che assicurano di raccogliere quel numero di gemme alla generica cella $(i,j)$. max_gems_to=[ [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, 0), (2, 1), (5, 1), (6, 1), (7, 1), (10, 1), (14, 1), (21, 1), None], [(0, 0), (2, 1), (3, 2), None, (8, 1), None, None, (21, 1), (22, 2), (24, 2)], [(0, 0), (6, 1), None, None, (11, 1), (18, 1), (19, 1), (22, 1), (27, 3), (31, 3)], [(0, 0), (11, 1), (12, 1), None, (15, 1), (23, 1), None, (31, 1), (34, 1), (40, 1)], [(0, 0), (12, 1), (15, 2), (18, 2), None, (26, 1), (27, 1), (32, 1), (44, 1), (52, 1)], [(0, 0), (13, 1), (19, 2), None, None, None, (30, 1), (33, 1), (52, 1), (61, 2)], [(0, 0), (20, 1), (25, 1), (27, 1), None, None, (32, 1), (36, 1), (56, 1), (61, 2)] ] #max_gems_to=conta_gemme_with_opt(mappa) # + hide_input=false visualizza_e_valuta("max_gems_to",max_gems_to) # - # __\[10 pts\]__ Componi ora una matrice $max\_gems\_from$, di dimensione $(m+2)times(n+2)$, nella cui cella $max\_gems\_from[i][j]$, per ogni $i = 1,..., m+1$ e $j = 1,..., n+1$, sia riposto il numero di gemme raccolte dalla generica cella $(i,j)$ alla cella $G9=(7,9)$ e il numero di percorsi che assicurano di raccogliere quel numero di gemme. # + #max_gems_from=conta_gemme_rev_with_opt(mappa) max_gems_from=[ [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)], [(0, 0), (61, 2), (61, 2), (59, 2), (56, 2), (55, 2), (54, 2), (51, 2), (43, 2), None, (0, 0)], [(0, 0), (27, 1), None, None, (53, 2), None, None, (47, 2), (36, 2), (29, 1), (0, 0)], [(0, 0), (25, 1), None, (53, 2), (51, 2), (48, 2), (41, 2), (40, 2), (35, 2), (27, 1), (0, 0)], [(0, 0), (21, 1), (15, 1), None, (41, 2), (37, 2), None, (39, 2), (30, 2), (23, 1), (0, 0)], [(0, 0), (16, 1), (14, 1), None, None, (32, 2), (29, 2), (28, 2), (27, 2), (17, 1), (0, 0)], [(0, 0), (15, 1), (11, 1), None, None, None, (21, 1), (18, 1), (17, 1), (9, 1), (0, 0)], [(0, 0), (14, 1), (7, 1), (2, 1), None, (11, 1), (9, 1), (7, 1), (4, 1), (0, 0), (0, 0)], [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)] ] # + hide_input=true visualizza_e_valuta("max_gems_from",max_gems_from) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} # Copyright (c) 2017-2022 Reveal Energy Services, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is part of Orchid and related technologies. # # - # # Example: Plotting well trajectories # This notebook illustrates using the Orchid* Python API and the `matplotlib` package to # plot well trajectories. # # (*Orchid is a mark of Reveal Energy Services, Inc) # ## 0.5 Import packages # The only import needed for the Orchid Python API is `orchid` itself. # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import orchid # - # The remaining imports are standard python packages to support the analysis. # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import matplotlib.pyplot as plt # The following import is included for its "side-effects" of an improved color schemes and # plot styles. (See the "Tip" in section 9.2 of "Python for Data Analysis" for details.) import seaborn as sns # - # ## 1.0 Load the .ifrac project # + [markdown] pycharm={"name": "#%% md\n"} # The following code simply captures the configured location of the Orchid training data. It is not needed to # use the Orchid Python API itself, but it is used in this example to load well-known data. # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} orchid_training_data_path = orchid.training_data_path() # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} project = orchid.load_project(str(orchid_training_data_path.joinpath( 'Project-frankNstein_Montney_UTM13_METERS.ifrac'))) # + [markdown] pycharm={"name": "#%% md\n"} # ### 1.1 Get the trajectories for each well of the project # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} all_wells = project.wells().all_objects() wells = dict([(w.name, w) for w in all_wells]) trajectories = {wn: w.trajectory for (wn, w) in wells.items()} # + [markdown] pycharm={"name": "#%% md\n"} # ### 1.2 Get the eastings ond northings in the project reference frame # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} eastings = {wn: t.get_easting_array(orchid.WellReferenceFrameXy.PROJECT) for (wn, t) in trajectories.items()} northings = {wn: t.get_northing_array(orchid.WellReferenceFrameXy.PROJECT) for (wn, t) in trajectories.items()} # + [markdown] pycharm={"name": "#%% md\n"} # ## 2.0 Plot the eastings and northings for each trajectory # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} default_well_colors = project.default_well_colors() for (i, wn) in enumerate(wells.keys()): plt.plot(eastings[wn], northings[wn], label=f'{wells[wn].display_name}', color=default_well_colors[i % len(default_well_colors)]) plt.title(f'{project.name} Well Trajectories (Project Coordinates)') plt.legend(loc='best') plt.xlabel(f'Easting ({orchid.abbreviation(project.project_units.LENGTH)})') plt.ylabel(f'Northing ({orchid.abbreviation(project.project_units.LENGTH)})') plt.rcParams['figure.dpi'] = 150 plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5gyFPhPWJev9" # ##### Copyright 2021 Google LLC. All Rights Reserved. # + id="LPGlYwKdJP3o" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="nGMkkZI9gGVD" # #**RLDS: Examples** # This colab provides some examples of RLDS usage based on real use cases. If you are looking for an introduction to RLDS, see the [RLDS tutorial](https://colab.research.google.com/github/google-research/rlds/blob/main/rlds/examples/rlds_tutorial.ipynb) in Google Colab. # + [markdown] id="WB0cAjdfrPXM" # # # #
# Run In Google Colab #
# + [markdown] id="36WPcDI8lVPI" # #Install Modules # + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1630078222348, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="LvD0ZsudlZVO" # !pip install rlds # !pip install tfds-nightly --upgrade # !pip install envlogger # !apt-get install libgmp-dev # + [markdown] id="tErv4WRmgTjE" # ##Import Modules # + id="ysYC-fdKjO3r" import functools import rlds import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds # + [markdown] id="9gwFBSgGjO3s" # #Load dataset # We can load the human dataset from the Panda Pick Place Can task of the [Robosuite collection in TFDS](https://www.tensorflow.org/datasets/catalog/overview#rlds). In these examples, we are assuming that certain fields are present in the steps, so datasets from different tasks will not be compatible. # + id="rUPVer19cCqG" dataset_config = 'human_dc29b40a' # @param { isTemplate : true} dataset_name = f'robosuite_panda_pick_place_can/{dataset_config}' num_episodes_to_load = 30 # @param { isTemplate: true} # + [markdown] id="hYFW_3X52y-h" # # Learning from Demonstrations or Offline RL # # We consider the setup where an agent needs to solve a task specified by a reward $r$. We assume a dataset of episodes with the corresponding rewards is available for training. This includes: # * The ORL setup [[1], [2] [3]] where the agent is trained solely from a dataset of episodes collected in the environment. # * The LfD setup [[4], [5], [6], [7]] where the agent can also interact with the environment. # # Using one of the two provided datasets on the Robosuite PickPlaceCan environment, a typical RLDS pipeline would include the following steps: # # 1. sample $K$ episodes from the dataset so the performance of the trained agent could be expressed as a function of the number of available episodes. # 1. combine the observations used as an input of the agent. The Robosuite datasets include many fields in the observations and one could try to train the agent from the state or form the visual observations for example. # 1. finally, convert the dataset of episodes into a dataset of transitions that can be consumed by algorithms such as SAC or TD3. # # [1]:(https://arxiv.org/abs/2005.01643) # [2]:(https://arxiv.org/abs/1911.11361) # [3]:(https://arxiv.org/abs/2103.01948) # [4]:(https://arxiv.org/abs/1909.01387) # [5]:(https://arxiv.org/abs/1704.03732) # [6]:(https://arxiv.org/abs/1707.08817) # [7]:(https://arxiv.org/abs/2006.12917) # + id="HAnUJzN03eij" K = 5 # @param { isTemplate: true} buffer_size = 30 # @param { isTemplate: true} # + id="pol7u-_D3Bwo" dataset = tfds.load(dataset_name, split=f'train[:{num_episodes_to_load}]') dataset = dataset.shuffle(buffer_size, seed=42, reshuffle_each_iteration=False) dataset = dataset.take(K) def prepare_observation(step): """Filters the obseravtion to only keep the state and flattens it.""" observation_names = ['robot0_proprio-state', 'object-state'] step[rlds.OBSERVATION] = tf.concat( [step[rlds.OBSERVATION][key] for key in observation_names], axis=-1) return step dataset = rlds.transformations.map_nested_steps(dataset, prepare_observation) def batch_to_transition(batch): """Converts a pair of consecutive steps to a custom transition format.""" return {'s_cur': batch[rlds.OBSERVATION][0], 'a': batch[rlds.ACTION][0], 'r': batch[rlds.REWARD][0], 's_next': batch[rlds.OBSERVATION][1]} def make_transition_dataset(episode): """Converts an episode of steps to a dataset of custom transitions.""" # Create a dataset of 2-step sequences with overlap of 1. batched_steps = rlds.transformations.batch(episode[rlds.STEPS], size=2, shift=1) return batched_steps.map(batch_to_transition) transitions_ds = dataset.flat_map(make_transition_dataset) # + [markdown] id="pWNhxwJzOUJv" # # Absorbing Terminal States in Imitation Learning # # Imitation learning is the setup where an agent tries to imitate a behavior, as defined by some sample episodes of that behavior. # In particular, the reward is not specified. # # The dataset processing pipeline requires all the different pieces seen in the learning from demonstrations setup (create a train split, assemble the observation, ...) but also has some specifics. # One specific is related to the particular role of the terminal state in imitation learning. # While in standard RL tasks, looping over the terminal states only brings zero in terms of reward, in imitation learning, making this assumption of zero reward for transitions from a terminal state to the same terminal state induces some bias in algorithms like GAIL. # One way to counter this bias was proposed in [1]. It consists in learning the reward value of the transition from the absorbing state to itself. # Implementation wise, to tell a terminal state from another state, an `absorbing` bit is added to the observation (`1` for a terminal state, `0` for a regular state). The dataset is also augmented with terminal state to terminal state transitions so the agent can learn from those transitions. # # [1]:(https://arxiv.org/abs/1809.02925) # + id="Bu8IW4u3PA1S" def duplicate_terminal_step(episode): """Duplicates the terminal step if the episode ends in one. Noop otherwise.""" return rlds.transformations.concat_if_terminal( episode, make_extra_steps=tf.data.Dataset.from_tensors) def convert_to_absorbing_state(step): padding = step[rlds.IS_TERMINAL] if step[rlds.IS_TERMINAL]: step[rlds.OBSERVATION] = tf.zeros_like(step[rlds.OBSERVATION]) step[rlds.ACTION] = tf.zeros_like(step[rlds.ACTION]) # This is no longer a terminal state as the episode loops indefinitely. step[rlds.IS_TERMINAL] = False step[rlds.IS_LAST] = False # Add the absorbing bit to the observation. step[rlds.OBSERVATION] = tf.concat([step[rlds.OBSERVATION], [padding]], 0) return step absorbing_state_ds = rlds.transformations.apply_nested_steps( dataset, duplicate_terminal_step) absorbing_state_ds = rlds.transformations.map_nested_steps( absorbing_state_ds, convert_to_absorbing_state) # + [markdown] id="MSmC3C1JPLyp" # # Offline Analysis # # One significant use case we envision for RLDS is the offline analysis of collected datasets. # There is no standard offline analysis procedure as what is possible is only limited by the imagination of the users. We expose in this section a fictitious use case to illustrate how custom tags stored in a RL dataset can be processed as part of an RLDS pipeline. # Let's assume we want to generate an histogram of the returns of the episodes present in the provided dataset of human episodes on the robosuite PickPlaceCan environment. This dataset holds episodes of fixed length of size 400 but also has a tag to indicate the actual end of the task. # We consider here the histogram of returns of the variable length episodes ending on the completion tag. # + id="eVol8u63PcEV" def placed_tag_is_set(step): return tf.not_equal(tf.math.count_nonzero(step['tag:placed']),0) def compute_return(steps): """Computes the return of the episode up to the 'placed' tag.""" # Truncate the episode after the placed tag. steps = rlds.transformations.truncate_after_condition( steps, truncate_condition=placed_tag_is_set) return rlds.transformations.sum_dataset(steps, lambda step: step[rlds.REWARD]) returns_ds = dataset.map(lambda episode: compute_return(episode[rlds.STEPS])) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="u0AFtWb3VzBy" # # Data Science Academy - Python Fundamentos - Capítulo 2 # # ## Download: http://github.com/dsacademybr # + id="k_LPU3tAVzB7" outputId="fc3383b6-aeba-47d7-e106-112edfa2ea9b" # Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version()) # + [markdown] id="ORq47ymSVzB-" # ## Variáveis e Operadores # + id="lKJpyrYXVzB-" # Atribuindo o valor 1 à variável var_teste var_teste = 1 # + colab={"base_uri": "https://localhost:8080/"} id="j9R8tVLZVzB_" outputId="b7240103-e0c2-4768-febb-5b46db489fde" # Imprimindo o valor da variável var_teste # + colab={"base_uri": "https://localhost:8080/"} id="VR-XehtvVzCA" outputId="8f3e0454-259b-4074-f595-73f936c2e16c" # Imprimindo o valor da variável print(var_teste) # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="xfylCTAYVzCA" outputId="8845e467-557e-49e7-aaf4-4284f210514b" # Não podemos utilizar uma variável que não foi definida. Veja a mensagem de erro. my_var # + id="46cpFAlQVzCB" var_teste = 2 # + colab={"base_uri": "https://localhost:8080/"} id="0NAY5iMuVzCC" outputId="4539b033-7768-4e19-f274-efc5a4b8cae5" var_teste # + colab={"base_uri": "https://localhost:8080/"} id="8_zB9zJ-VzCD" outputId="04a4abab-35b6-4a07-ddc0-ccbc3fe7aeec" type(var_teste) # + id="kThEO9_EVzCE" var_teste = 9.5 # + id="I-OjN_-lVzCE" outputId="775ae098-be7c-49cd-9ae5-047c10df631f" type(var_teste) # + id="ouL35KWcVzCF" x = 1 # + id="XLzeXif7VzCF" outputId="8265453b-e97c-42c4-8bc5-a15912313185" x # + [markdown] id="X2X4PKBlVzCG" # ## Declaração Múltipla # + id="_Ze9eCIsVzCG" pessoa1, pessoa2, pessoa3 = "Maria", "José", "Tobias" # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="itxr5AddVzCG" outputId="0e32001b-cfb3-4874-8ffe-5f1b5a49a881" pessoa1 # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="_JGvPUXbVzCH" outputId="9d5b802c-385c-4557-8adc-f19f2086e9e6" pessoa2 # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="xgt_zEcNVzCH" outputId="665fddc9-8d60-4ae4-90c5-4de835c44991" pessoa3 # + id="y_GtmsJWVzCI" fruta1 = fruta2 = fruta3 = "Laranja" # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="WeImXiNIVzCI" outputId="242f5bde-6c3c-445b-967a-1c8b95efca8e" fruta1 # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="8wyzSfREVzCJ" outputId="1f9dd164-9805-4673-9d0c-0a0b35156b38" fruta2 # + colab={"base_uri": "https://localhost:8080/", "height": 208} id="RL8P7s90VzCJ" outputId="a159604a-2ea7-4028-e050-f58cfe19b690" # Fique atento!!! Python é case-sensitive. Criamos a variável fruta2, mas não a variável Fruta2. # Letras maiúsculas e minúsculas tem diferença no nome da variável. Fruta2 # + [markdown] id="NupiHtXgVzCJ" # ## Pode-se usar letras, números e underline (mas não se pode começar com números) # + id="qjsv5Ii4VzCK" x1 = 50 # + colab={"base_uri": "https://localhost:8080/"} id="CdBXn-c_VzCK" outputId="e010366c-5817-4d17-f871-ed5cbfa28797" x1 # + id="CSASZ6HEVzCK" outputId="a57e4567-75ed-48fb-cc84-7b83e6781bb7" # Mensagem de erro, pois o Python não permite nomes de variáveis que iniciem com números 1x = 50 # + [markdown] id="soUNjarFVzCK" # ## Não se pode usar palavras reservadas como nome de variável # # ## False # ## class # ## finally # ## is # ## return # ## None # ## continue # ## for # ## lambda # ## try # ## True # ## def # ## from # ## nonlocal # ## while # ## and # ## del # ## global # ## not # ## with # ## as # ## elif # ## if # ## or # ## yield # ## assert # ## else # ## import # ## pass # ## break # ## except # ## in # ## raise # + colab={"base_uri": "https://localhost:8080/", "height": 135} id="TbwQjEnlVzCL" outputId="9798a6b5-9d4f-4679-b4c1-d50a5cfba564" # Não podemos usar palavras reservadas como nome de variável break = 1 # + [markdown] id="v1bCRa3BVzCL" # ## Variáveis atribuídas a outras variáveis e ordem dos operadores # + id="bD3RY2v8VzCM" largura = 2 # + id="NPt0s0e1VzCM" altura = 4 # + id="RLCB9sVcVzCM" area = largura * altura # + colab={"base_uri": "https://localhost:8080/"} id="rNBzpebkVzCM" outputId="23dfa2e9-d7ea-485f-f8ee-636012af92ff" area # + id="60eimneUVzCM" perimetro = 2 * largura + 2 * altura # + colab={"base_uri": "https://localhost:8080/"} id="Lmy3iBrJVzCN" outputId="9aee1331-7806-4b44-93f3-76a97bfa2dbd" perimetro # + id="AhHwYenQVzCN" # A ordem dos operadores é a mesma seguida na Matemática perimetro = 2 * (largura + 2) * altura # + id="Otvc0p0OVzCN" outputId="7fa01035-e817-4244-de02-3b995644e178" perimetro # + [markdown] id="HmIhyo4DVzCO" # ## Operações com variáveis # + id="AOjCFHn9VzCO" idade1 = 25 # + id="wKJBlEK4VzCO" idade2 = 35 # + colab={"base_uri": "https://localhost:8080/"} id="lofpHCbCVzCO" outputId="ae68439d-86ed-4d9e-faca-4053a02cad82" idade1 + idade2 # + colab={"base_uri": "https://localhost:8080/"} id="9h25he9oVzCO" outputId="96982855-e8b3-4ddc-b23d-616c608639fe" idade2 - idade1 # + colab={"base_uri": "https://localhost:8080/"} id="kuYrkPTvVzCP" outputId="5fffd571-2543-4f1c-ad24-9810eaf3c3cd" idade2 * idade1 # + colab={"base_uri": "https://localhost:8080/"} id="TKyttuY5VzCP" outputId="2546f76b-275d-4f46-8f95-e8060ddfe8e9" idade2 / idade1 # + colab={"base_uri": "https://localhost:8080/"} id="Vw5oM61PVzCP" outputId="28370f41-5635-42de-e18e-c70156235f5f" idade2 % idade1 # + [markdown] id="E3LSkLOLVzCP" # ## Concatenação de Variáveis # + id="2gnvnf2FVzCP" nome = "Steve" # + id="6ETKurjPVzCQ" sobrenome = "Jobs" # + id="Uh9AVaiyVzCQ" fullName = nome + " " + sobrenome # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="tWFZxI6nVzCQ" outputId="f691cb4c-0d09-41b6-b653-526b33b93714" fullName # + [markdown] id="4KgoDJvGVzCQ" # # Fim # + [markdown] id="yAiZnO31VzCQ" # ### Obrigado # # ### Visite o Blog da Data Science Academy - Blog DSA # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd dt_links=pd.read_csv("links.csv",encoding="utf-8") dt_movies=pd.read_csv("movies.csv",encoding="utf-8") dt_tags=pd.read_csv("tags.csv",encoding="utf-8") dt_ratings=pd.read_csv("ratings.csv",encoding="utf-8") parsed_films=pd.read_csv("films_parsed.csv",encoding="utf-8") user_films=pd.read_csv("users_films.csv",encoding="utf-8") user_films.pop('userId') users_films=user_films.copy # ![vase](wr.png) # v количество отзывов # m минимальное количесчтво отзывов для чарта # R средний рейтинг фильм # C средний рейтинг по всем фильмам def getAverageRatings(users_films, if_user): """Funtion calculating average rating by user or for film Args: users_films (DataFrame): Table with users and films they rated/viewed. if_user (bool): this telling us calculate for user/film. Returns: dict: Avarege rating for each movie or user. """ ax = 1 if if_user else 0 #axis = 1 means rows and axis = 0 means columns sumOfRatings = users_films.sum(axis = ax).to_numpy() #this will give an array of sum of all the ratings of user if axis = 1 else #sum of all the ratings of movies if axis = 0 noOfRatings = (users_films!=0).sum(axis = ax).to_numpy() #this will give a boolean True or False array, and True means 1 and False #means 0, and further we are summing it to get the count of all the non-zero cells means length of non-zero cells rows, cols = users_films.shape averageRatings = {i: sumOfRatings[i]/noOfRatings[i] for i in range(rows if if_user else cols) if noOfRatings[i]!=0} return averageRatings avg_raring_per_movie = list(getAverageRatings(user_films, False).values()) def Average(lst): return sum(lst) / len(lst) C=Average(avg_raring_per_movie) dt_ratings_2= dt_ratings.groupby(['movieId']).count() dt_ratings_2=dt_ratings_2.loc[dt_ratings_2['userId'] > 1] m=dt_ratings_2['userId'].quantile(0.9) q_movies = dt_ratings_2.copy().loc[dt_ratings_2['userId'] >= m] q_movies avg_raring_per_movie = getAverageRatings(user_films, False) movies_merged=pd.merge(dt_movies, dt_ratings.groupby(['movieId']).count(), on='movieId', how='inner') movies_merged=movies_merged.loc[movies_merged['userId'] > 1] movies_merged['avg']=list(getAverageRatings(user_films, False).values()) len(list(getAverageRatings(user_films, False).values())) m=movies_merged['userId'].quantile(0.9) q_movies = movies_merged.copy().loc[movies_merged['userId'] >= m] def weighted_rating(x, m=m, C=C): v = x['userId'] R = x['avg'] # Calculation based on the IMDB formula return (v/(v+m) * R) + (m/(m+v) * C) q_movies['score'] = q_movies.apply(weighted_rating, axis=1) q_movies = q_movies.sort_values('score', ascending=False) q_movies q_movies.loc[q_movies['movieId']==7153].index[0] def find_no_viewed(x,k): if user_films[str(x)][k]==0: return x else: pass def recommend_for_user_k(k): recomend=[] for i in q_movies['movieId']: if user_films[str(i)][k-1]==0: recomend.append(q_movies.loc[q_movies['movieId']==i].index[0]) if len(recomend) == 10: return recomend recommend_for_user_k(1) getAverageRatings(user_films, True)[0] n=0 k=9 deleted=[] for i in range (n,k): dict_={} p=10 avg = getAverageRatings(user_films, True)[i] for j in movies_merged['movieId'].sample(frac = 1): if user_films[str(j)][i]>avg: dict_[(movies_merged[movies_merged['movieId']==j].index[0])]=user_films[str(j)][i] p=p-1 user_films[str(j)][i]=0 if p==0 : break deleted.append(dict_) for user in (1,10): count=0 recs=recommend_for_user_k(user) for rec in recs: if users_films[str(dt_movies['movieId'][rec])][user]>0: count=count+1 print(count/10) dt_movies['movieId'][6298] users_films[str(dt_movies['movieId'][6298])][1] list(dict(sorted(deleted[2].items(), key=lambda item: item[1])).keys()) ndcg_score([[5,4,2,1]],[[5,0,0,0]]) [[0,0,3,0]] recommend_for_user_k(1) users_films # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2020년 4월 13일 월요일 # ### Programmers - 스택 / 큐 : 다리를 지나는 트럭 # ### 문제 : https://programmers.co.kr/learn/courses/30/lessons/42583 # ### 블로그 : https://somjang.tistory.com/entry/Programmers-%EC%8A%A4%ED%83%9D%ED%81%90-%EB%8B%A4%EB%A6%AC%EB%A5%BC-%EC%A7%80%EB%82%98%EB%8A%94-%ED%8A%B8%EB%9F%AD-Python # ### 첫번째 시도 # + from collections import deque def solution(bridge_length, weight, truck_weights): answer = 0 truck_weights_deque = deque(truck_weights) trucks_on_bridge = [] while True: if len(truck_weights_deque) == 0: break if sum(trucks_on_bridge) < weight: trucks_on_bridge.append(truck_weights_deque.popleft()) if len(truck_weights_deque) == 0: answer = answer + bridge_length elif sum(trucks_on_bridge) >= weight: answer = answer + bridge_length trucks_on_bridge = [] answer = answer + len(truck_weights) return answer # - # --- # ### 두번째 시도 # + from collections import deque def solution(bridge_length, weight, truck_weights): answer = 1 truck_weights_deque = deque(truck_weights) trucks_on_bridge_deque = deque([0] * bridge_length) trucks_on_bridge_deque[-1] = truck_weights_deque.popleft() while truck_weights_deque: if sum(trucks_on_bridge_deque) - trucks_on_bridge_deque[0] + truck_weights_deque[0] > weight: trucks_on_bridge_deque.popleft() trucks_on_bridge_deque.append(0) answer = answer + 1 else: trucks_on_bridge_deque.popleft() trucks_on_bridge_deque.append(truck_weights_deque.popleft()) answer = answer + 1 answer = answer + bridge_length return answer # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Parameters FUDGE_FACTOR = 1.1200 # Multiply forecasts by this XGB_WEIGHT = 0.6200 BASELINE_WEIGHT = 0.0100 OLS_WEIGHT = 0.0620 NN_WEIGHT = 0.0800 XGB1_WEIGHT = 0.8000 # Weight of first in combination of two XGB models BASELINE_PRED = 0.0115 # Baseline based on mean of training data, per Oleg import numpy as np import pandas as pd import xgboost as xgb from sklearn.preprocessing import LabelEncoder import lightgbm as lgb import gc from sklearn.linear_model import LinearRegression import random import datetime as dt from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout, BatchNormalization from keras.layers.advanced_activations import PReLU from keras.layers.noise import GaussianDropout from keras.optimizers import Adam from keras.wrappers.scikit_learn import KerasRegressor from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer ##### READ IN RAW DATA print( "\nReading data from disk ...") prop = pd.read_csv('../Data/properties_2016.csv') train = pd.read_csv("../Data/train_2016_v2.csv") ################ ################ ## LightGBM ## ################ ################ # This section is (I think) originally derived from SIDHARTH's script: # https://www.kaggle.com/sidharthkumar/trying-lightgbm # which was forked and tuned by : # https://www.kaggle.com/yuqingxue/lightgbm-85-97 # and updated by me (): # https://www.kaggle.com/aharless/lightgbm-with-outliers-remaining # and a lot of additional changes have happened since then ##### PROCESS DATA FOR LIGHTGBM print( "\nProcessing data for LightGBM ..." ) for c, dtype in zip(prop.columns, prop.dtypes): if dtype == np.float64: prop[c] = prop[c].astype(np.float32) df_train = train.merge(prop, how='left', on='parcelid') df_train.fillna(df_train.median(),inplace = True) x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode', 'fireplacecnt', 'fireplaceflag'], axis=1) #x_train['Ratio_1'] = x_train['taxvaluedollarcnt']/x_train['taxamount'] y_train = df_train['logerror'].values print(x_train.shape, y_train.shape) train_columns = x_train.columns for c in x_train.dtypes[x_train.dtypes == object].index.values: x_train[c] = (x_train[c] == True) del df_train; gc.collect() x_train = x_train.values.astype(np.float32, copy=False) d_train = lgb.Dataset(x_train, label=y_train) ##### RUN LIGHTGBM params = {} params['max_bin'] = 10 params['learning_rate'] = 0.0021 # shrinkage_rate params['boosting_type'] = 'gbdt' params['objective'] = 'regression' params['metric'] = 'l1' # or 'mae' params['sub_feature'] = 0.345 # feature_fraction (small values => use very different submodels) params['bagging_fraction'] = 0.85 # sub_row params['bagging_freq'] = 40 params['num_leaves'] = 512 # num_leaf params['min_data'] = 500 # min_data_in_leaf params['min_hessian'] = 0.05 # min_sum_hessian_in_leaf params['verbose'] = 0 params['feature_fraction_seed'] = 2 params['bagging_seed'] = 3 np.random.seed(0) random.seed(0) print("\nFitting LightGBM model ...") clf = lgb.train(params, d_train, 430) del d_train; gc.collect() del x_train; gc.collect() print("\nPrepare for LightGBM prediction ...") print(" Read sample file ...") sample = pd.read_csv('../Data/sample_submission.csv') print(" ...") sample['parcelid'] = sample['ParcelId'] print(" Merge with property data ...") df_test = sample.merge(prop, on='parcelid', how='left') print(" ...") del sample, prop; gc.collect() print(" ...") #df_test['Ratio_1'] = df_test['taxvaluedollarcnt']/df_test['taxamount'] x_test = df_test[train_columns] print(" ...") del df_test; gc.collect() print(" Preparing x_test...") for c in x_test.dtypes[x_test.dtypes == object].index.values: x_test[c] = (x_test[c] == True) print(" ...") x_test = x_test.values.astype(np.float32, copy=False) print("\nStart LightGBM prediction ...") p_test = clf.predict(x_test) del x_test; gc.collect() print( "\nUnadjusted LightGBM predictions:" ) print( pd.DataFrame(p_test).head() ) ################ ################ ## XGBoost ## ################ ################ # This section is (I think) originally derived from Infinite Wing's script: # https://www.kaggle.com/infinitewing/xgboost-without-outliers-lb-0-06463 # inspired by this thread: # https://www.kaggle.com/c/zillow-prize-1/discussion/33710 # but the code has gone through a lot of changes since then ##### RE-READ PROPERTIES FILE ##### (I tried keeping a copy, but the program crashed.) print( "\nRe-reading properties file ...") properties = pd.read_csv('../Data/properties_2016.csv') ##### PROCESS DATA FOR XGBOOST print( "\nProcessing data for XGBoost ...") for c in properties.columns: properties[c]=properties[c].fillna(-1) if properties[c].dtype == 'object': lbl = LabelEncoder() lbl.fit(list(properties[c].values)) properties[c] = lbl.transform(list(properties[c].values)) train_df = train.merge(properties, how='left', on='parcelid') x_train = train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1) x_test = properties.drop(['parcelid'], axis=1) # shape print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape)) # drop out ouliers train_df=train_df[ train_df.logerror > -0.4 ] train_df=train_df[ train_df.logerror < 0.419 ] x_train=train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1) y_train = train_df["logerror"].values.astype(np.float32) y_mean = np.mean(y_train) print('After removing outliers:') print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape)) ##### RUN XGBOOST print("\nSetting up data for XGBoost ...") # xgboost params xgb_params = { 'eta': 0.037, 'max_depth': 5, 'subsample': 0.80, 'objective': 'reg:linear', 'eval_metric': 'mae', 'lambda': 0.8, 'alpha': 0.4, 'base_score': y_mean, 'silent': 1 } dtrain = xgb.DMatrix(x_train, y_train) dtest = xgb.DMatrix(x_test) num_boost_rounds = 250 print("num_boost_rounds="+str(num_boost_rounds)) # train model print( "\nTraining XGBoost ...") model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds) print( "\nPredicting with XGBoost ...") xgb_pred1 = model.predict(dtest) print( "\nFirst XGBoost predictions:" ) print( pd.DataFrame(xgb_pred1).head() ) ##### RUN XGBOOST AGAIN print("\nSetting up data for XGBoost ...") # xgboost params xgb_params = { 'eta': 0.033, 'max_depth': 6, 'subsample': 0.80, 'objective': 'reg:linear', 'eval_metric': 'mae', 'base_score': y_mean, 'silent': 1 } num_boost_rounds = 150 print("num_boost_rounds="+str(num_boost_rounds)) print( "\nTraining XGBoost again ...") model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds) print( "\nPredicting with XGBoost again ...") xgb_pred2 = model.predict(dtest) print( "\nSecond XGBoost predictions:" ) print( pd.DataFrame(xgb_pred2).head() ) ##### COMBINE XGBOOST RESULTS xgb_pred = XGB1_WEIGHT*xgb_pred1 + (1-XGB1_WEIGHT)*xgb_pred2 #xgb_pred = xgb_pred1 print( "\nCombined XGBoost predictions:" ) print( pd.DataFrame(xgb_pred).head() ) del train_df del x_train del x_test del properties del dtest del dtrain del xgb_pred1 del xgb_pred2 gc.collect() ###################### ###################### ## Neural Network ## ###################### ###################### # Neural network copied from this script: # https://www.kaggle.com/aharless/keras-neural-network-lb-06492 (version 20) # which was built on the skeleton in this notebook: # https://www.kaggle.com/prasunmishra/ann-using-keras # Read in data for neural network print( "\n\nProcessing data for Neural Network ...") print('\nLoading train, prop and sample data...') train = pd.read_csv("../Data/train_2016_v2.csv", parse_dates=["transactiondate"]) prop = pd.read_csv('../Data/properties_2016.csv') sample = pd.read_csv('../Data/sample_submission.csv') print('Fitting Label Encoder on properties...') for c in prop.columns: prop[c]=prop[c].fillna(-1) if prop[c].dtype == 'object': lbl = LabelEncoder() lbl.fit(list(prop[c].values)) prop[c] = lbl.transform(list(prop[c].values)) print('Creating training set...') df_train = train.merge(prop, how='left', on='parcelid') df_train["transactiondate"] = pd.to_datetime(df_train["transactiondate"]) df_train["transactiondate_year"] = df_train["transactiondate"].dt.year df_train["transactiondate_month"] = df_train["transactiondate"].dt.month df_train['transactiondate_quarter'] = df_train['transactiondate'].dt.quarter df_train["transactiondate"] = df_train["transactiondate"].dt.day print('Filling NA/NaN values...' ) df_train.fillna(-1.0) print('Creating x_train and y_train from df_train...' ) x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode','fireplacecnt', 'fireplaceflag'], axis=1) y_train = df_train["logerror"] y_mean = np.mean(y_train) print(x_train.shape, y_train.shape) train_columns = x_train.columns for c in x_train.dtypes[x_train.dtypes == object].index.values: x_train[c] = (x_train[c] == True) print('Creating df_test...') sample['parcelid'] = sample['ParcelId'] print("Merging Sample with property data...") df_test = sample.merge(prop, on='parcelid', how='left') df_test["transactiondate"] = pd.to_datetime('2016-11-15') # placeholder value for preliminary version df_test["transactiondate_year"] = df_test["transactiondate"].dt.year df_test["transactiondate_month"] = df_test["transactiondate"].dt.month df_test['transactiondate_quarter'] = df_test['transactiondate'].dt.quarter df_test["transactiondate"] = df_test["transactiondate"].dt.day x_test = df_test[train_columns] print('Shape of x_test:', x_test.shape) print("Preparing x_test...") for c in x_test.dtypes[x_test.dtypes == object].index.values: x_test[c] = (x_test[c] == True) ## Preprocessing print("\nPreprocessing neural network data...") imputer= Imputer() imputer.fit(x_train.iloc[:, :]) x_train = imputer.transform(x_train.iloc[:, :]) imputer.fit(x_test.iloc[:, :]) x_test = imputer.transform(x_test.iloc[:, :]) sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) len_x=int(x_train.shape[1]) print("len_x is:",len_x) # Neural Network print("\nSetting up neural network model...") nn = Sequential() nn.add(Dense(units = 400 , kernel_initializer = 'normal', input_dim = len_x)) nn.add(PReLU()) nn.add(Dropout(.4)) nn.add(Dense(units = 160 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.6)) nn.add(Dense(units = 64 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.5)) nn.add(Dense(units = 26, kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.6)) nn.add(Dense(1, kernel_initializer='normal')) nn.compile(loss='mae', optimizer=Adam(lr=4e-3, decay=1e-4)) print("\nFitting neural network model...") nn.fit(np.array(x_train), np.array(y_train), batch_size = 32, epochs = 70, verbose=2) print("\nPredicting with neural network model...") #print("x_test.shape:",x_test.shape) y_pred_ann = nn.predict(x_test) print( "\nPreparing results for write..." ) nn_pred = y_pred_ann.flatten() print( "Type of nn_pred is ", type(nn_pred) ) print( "Shape of nn_pred is ", nn_pred.shape ) print( "\nNeural Network predictions:" ) print( pd.DataFrame(nn_pred).head() ) # Cleanup del train del prop del sample del x_train del x_test del df_train del df_test del y_pred_ann gc.collect() ################ ################ ## OLS ## ################ ################ # This section is derived from the1owl's notebook: # https://www.kaggle.com/the1owl/primer-for-the-zillow-pred-approach # which I () updated and made into a script: # https://www.kaggle.com/aharless/updated-script-version-of-the1owl-s-basic-ols np.random.seed(17) random.seed(17) print( "\n\nProcessing data for OLS ...") train = pd.read_csv("../Data/train_2016_v2.csv", parse_dates=["transactiondate"]) properties = pd.read_csv("../Data/properties_2016.csv") submission = pd.read_csv("../Data/sample_submission.csv") print(len(train),len(properties),len(submission)) def get_features(df): df["transactiondate"] = pd.to_datetime(df["transactiondate"]) df["transactiondate_year"] = df["transactiondate"].dt.year df["transactiondate_month"] = df["transactiondate"].dt.month df['transactiondate'] = df['transactiondate'].dt.quarter df = df.fillna(-1.0) return df def MAE(y, ypred): #logerror=log(Zestimate)−log(SalePrice) return np.sum([abs(y[i]-ypred[i]) for i in range(len(y))]) / len(y) train = pd.merge(train, properties, how='left', on='parcelid') y = train['logerror'].values test = pd.merge(submission, properties, how='left', left_on='ParcelId', right_on='parcelid') properties = [] #memory exc = [train.columns[c] for c in range(len(train.columns)) if train.dtypes[c] == 'O'] + ['logerror','parcelid'] col = [c for c in train.columns if c not in exc] train = get_features(train[col]) test['transactiondate'] = '2016-01-01' #should use the most common training date test = get_features(test[col]) print("\nFitting OLS...") reg = LinearRegression(n_jobs=-1) reg.fit(train, y); print('fit...') print(MAE(y, reg.predict(train))) train = []; y = [] #memory test_dates = ['2016-10-01','2016-11-01','2016-12-01','2017-10-01','2017-11-01','2017-12-01'] test_columns = ['201610','201611','201612','201710','201711','201712'] ######################## ######################## ## Combine and Save ## ######################## ######################## ##### COMBINE PREDICTIONS print( "\nCombining XGBoost, LightGBM, NN, and baseline predicitons ..." ) lgb_weight = 1 - XGB_WEIGHT - BASELINE_WEIGHT - NN_WEIGHT - OLS_WEIGHT lgb_weight0 = lgb_weight / (1 - OLS_WEIGHT) xgb_weight0 = XGB_WEIGHT / (1 - OLS_WEIGHT) baseline_weight0 = BASELINE_WEIGHT / (1 - OLS_WEIGHT) nn_weight0 = NN_WEIGHT / (1 - OLS_WEIGHT) pred0 = 0 pred0 += xgb_weight0*xgb_pred pred0 += baseline_weight0*BASELINE_PRED pred0 += lgb_weight0*p_test pred0 += nn_weight0*nn_pred print( "\nCombined XGB/LGB/NN/baseline predictions:" ) print( pd.DataFrame(pred0).head() ) print( "\nPredicting with OLS and combining with XGB/LGB/NN/baseline predicitons: ..." ) for i in range(len(test_dates)): test['transactiondate'] = test_dates[i] pred = FUDGE_FACTOR * ( OLS_WEIGHT*reg.predict(get_features(test)) + (1-OLS_WEIGHT)*pred0 ) submission[test_columns[i]] = [float(format(x, '.4f')) for x in pred] print('predict...', i) print( "\nCombined XGB/LGB/NN/baseline/OLS predictions:" ) print( submission.head() ) ##### WRITE THE RESULTS from datetime import datetime print( "\nWriting results to disk ..." ) submission.to_csv('sub{}.csv'.format(datetime.now().strftime('%Y%m%d_%H%M%S')), index=False) print( "\nFinished ...") # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="wniawHP7h34E" # # Google Colab (GC) - Numba GPU # - # Código fonte: gc_nb_gpu.py # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 222, "status": "ok", "timestamp": 1632527588661, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzd7UO2EoPv4GHJ2AiwebDPC3evXCqxNZAD2FZ=s64", "userId": "04933934069991532991"}, "user_tz": 180} id="FisGDvbyOQ08" outputId="18348a80-333c-467b-afbe-ae2fa8dff470" # ! nvidia-smi -L # - # ### roda pela primeira vez: from gc_nb_gpu import stencil result = stencil(2400) # ### roda pela segunda vez (usa o que está no cache): from gc_nb_gpu import stencil result = stencil(2400) # ### visualiza o resultado # %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) plt.imshow(result[600:2200,600:2200], cmap='jet') import numpy as np a = result[600:2200,600:2200] t = np.arange(a.shape[0]) x, y = np.meshgrid(t, t) ax = plt.figure(figsize=(8, 8)).gca(projection='3d') ax.plot_surface(x, y, a, cmap='jet', rcount=200, ccount=200) plt.show() # --- # # Usando uma grade com 8 x 8 pontos # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2460, "status": "ok", "timestamp": 1632515999169, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzd7UO2EoPv4GHJ2AiwebDPC3evXCqxNZAD2FZ=s64", "userId": "04933934069991532991"}, "user_tz": 180} id="3-JbWmKch34N" outputId="9223ea1d-fa76-47ce-8e5f-a800402d6c96" from gc_nb_gpu import stencil result = stencil(8) # - # %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) plt.imshow(result, cmap='jet') t = np.arange(result.shape[0]) x, y = np.meshgrid(t, t) ax = plt.figure(figsize=(8, 8)).gca(projection='3d') ax.plot_surface(x, y, result, cmap='jet') plt.show() # --- # # Grade 24 x 24 # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2460, "status": "ok", "timestamp": 1632515999169, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzd7UO2EoPv4GHJ2AiwebDPC3evXCqxNZAD2FZ=s64", "userId": "04933934069991532991"}, "user_tz": 180} id="3-JbWmKch34N" outputId="9223ea1d-fa76-47ce-8e5f-a800402d6c96" from gc_nb_gpu import stencil result = stencil(24) # - # %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) plt.imshow(result, cmap='jet') t = np.arange(result.shape[0]) x, y = np.meshgrid(t, t) ax = plt.figure(figsize=(8, 8)).gca(projection='3d') ax.plot_surface(x, y, result, cmap='jet') plt.show() # --- # # Grade 240 x 240 # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2460, "status": "ok", "timestamp": 1632515999169, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzd7UO2EoPv4GHJ2AiwebDPC3evXCqxNZAD2FZ=s64", "userId": "04933934069991532991"}, "user_tz": 180} id="3-JbWmKch34N" outputId="9223ea1d-fa76-47ce-8e5f-a800402d6c96" from gc_nb_gpu import stencil result = stencil(240) # - # %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) plt.imshow(result, cmap='jet') t = np.arange(result.shape[0]) x, y = np.meshgrid(t, t) ax = plt.figure(figsize=(8, 8)).gca(projection='3d') ax.plot_surface(x, y, result, cmap='jet') plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: widgets-tutorial # language: python # name: widgets-tutorial # --- # # Layout and Styling of Jupyter Widgets # # This section of the tutorial describes # # + ## [Layout and Styling of Jupyter Widgets](03.01-widget-layout-and-styling.ipynb) # How to lay out and style Jupyter interactive widgets to build rich and *reactive* widget-based applications. # # + ## [Container Layout Widgets](03.01-widget-layout-and-styling.ipynb#Container-Layout-Widgets) # Some high-level container widgets for laying out widget-based applications. # # + ## [Widget Label Styling (*Optional*)](03.02-OPTIONAL-widget-label-styling.ipynb) # Layout and styling of widget labels (please read this on your own). # # + ## [Predefined Widget Styles (*Optional*)](03.03-OPTIONAL-widget-specific-styling.ipynb) # Styling available for some individual widgets (please read this on your own). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # First, install Firrtl by following their [installation instructions](https://github.com/freechipsproject/firrtl#installation-instructions) # # Be sure to add the directory containing the firrtl command line tool (typically `firrtl/utils/bin`) to your `$PATH` # # # The FIRRTL backend for magma is experimental and woefully lacking in support for standard mantle circuits. The core functionality has been implemented to demonstrate the capability of compiling magma circuits to FIRRTL. Pull requests that expand support for the FIRRTL are welcome. # + import magma as m m.set_mantle_target("coreir") import mantle main = m.DefineCircuit('main', "a", m.In(m.Bit), "b", m.In(m.Bit), "c", m.In(m.Bit), "d", m.Out(m.Bit)) d = (main.a & main.b) ^ main.c m.wire(d, main.d) m.compile("build/main", main, output="firrtl") with open("build/main.fir", "r") as f: print(f.read()) # - # *Note*: the `!` syntax used in the next cell is jupyter notebook syntax sugar for executing a shell command # !firrtl -i build/main.fir -o build/main.v -X verilog with open("build/main.v", "r") as f: print(f.read()) with open("build/sim_main.cpp", "w") as sim_main_f: sim_main_f.write(""" #include "Vmain.h" #include "verilated.h" #include #include int main(int argc, char **argv, char **env) { Verilated::commandArgs(argc, argv); Vmain* top = new Vmain; int tests[8][4] = { {0, 0, 0, 0}, {1, 0, 0, 0}, {0, 1, 0, 0}, {1, 1, 0, 1}, {0, 0, 1, 1}, {1, 0, 1, 1}, {0, 1, 1, 1}, {1, 1, 1, 0}, }; for(int i = 0; i < 8; i++) { int* test = tests[i]; int a = test[0]; int b = test[1]; int c = test[2]; int d = test[3]; top->a = a; top->b = b; top->c = c; top->eval(); assert(top->d == d); } delete top; std::cout << "Success" << std::endl; exit(0); } """) # *Note*: The `%%bash` statement is a jupyter notebook magic operator that treats the cell as a bash script # + language="bash" # cd build # verilator -Wall -Wno-DECLFILENAME --cc main.v --exe sim_main.cpp # make -C obj_dir -j -f Vmain.mk Vmain # ./obj_dir/Vmain # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import logging from gensim.models import Word2Vec from gensim.models.word2vec import LineSentence import os logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) class InputSentences(object): def __init__(self, dirname): self.dirname = dirname def __iter__(self): for fname in os.listdir(self.dirname): for line in open(os.path.join(self.dirname, fname)): yield line.split() # this is a very simple, use-case sentences = [['Rome', 'Italy'], ['Beijing', 'China']] # train word2vec on the two sentences model = Word2Vec(sentences, min_count=1) model.most_similar(positive=['Rome'], topn=1) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="V58rxea0HqSa" colab={"base_uri": "https://localhost:8080/"} outputId="a9b62101-4f2e-4fc6-86ae-167b5754294d" import os # Find the latest version of spark 3.0 from http://www.apache.org/dist/spark/ and enter as the spark version # For example: # spark_version = 'spark-3.0.3' spark_version = 'spark-3.0.3' os.environ['SPARK_VERSION']=spark_version # Install Spark and Java # !apt-get update # !apt-get install openjdk-11-jdk-headless -qq > /dev/null # !wget -q http://www.apache.org/dist/spark/$SPARK_VERSION/$SPARK_VERSION-bin-hadoop2.7.tgz # !tar xf $SPARK_VERSION-bin-hadoop2.7.tgz # !pip install -q findspark # Set Environment Variables import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64" os.environ["SPARK_HOME"] = f"/content/{spark_version}-bin-hadoop2.7" # Start a SparkSession import findspark findspark.init() # + id="_xKwTpATHqSe" colab={"base_uri": "https://localhost:8080/"} outputId="5ab64a10-f498-438c-a341-ff5fc079e004" # Download the Postgres driver that will allow Spark to interact with Postgres. # !wget https://jdbc.postgresql.org/download/postgresql-42.2.16.jar # + id="MMqDAjVS0KN9" from pyspark.sql import SparkSession spark = SparkSession.builder.appName("M16-Amazon-Challenge").config("spark.driver.extraClassPath","/content/postgresql-42.2.16.jar").getOrCreate() # + [markdown] id="cyBsySGuY-9V" # ### Load Amazon Data into Spark DataFrame # + id="CtCmBhQJY-9Z" colab={"base_uri": "https://localhost:8080/"} outputId="15da4527-c3a2-451f-b3c3-28b1ad88b7f8" from pyspark import SparkFiles url = "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Software_v1_00.tsv.gz" spark.sparkContext.addFile(url) df = spark.read.option("encoding", "UTF-8").csv(SparkFiles.get(""), sep="\t", header=True, inferSchema=True) df.show() # + [markdown] id="2yUSe55VY-9t" # ### Create DataFrames to match tables # + id="C8REmY1aY-9u" from pyspark.sql.functions import to_date # Read in the Review dataset as a DataFrame # + id="B0TESUDRY-90" colab={"base_uri": "https://localhost:8080/"} outputId="28675d19-d8c9-44df-d6be-1f5cd5c5f1e9" # Create the customers_table DataFrame customers_df = df.groupby("customer_id").count().withColumnRenamed("count", "customer_count") customers_df.show() # + id="4FwXA6UvY-96" colab={"base_uri": "https://localhost:8080/"} outputId="4d349584-bede-424f-eef9-002960e7f5a3" # Create the products_table DataFrame and drop duplicates. products_df = df.select(["product_id","product_title"]).drop_duplicates() products_df.show() # + id="MkqyCuNQY-9-" colab={"base_uri": "https://localhost:8080/"} outputId="d80103ca-3d17-4d9e-f1c1-a37b12359f44" # Create the review_id_table DataFrame. # Convert the 'review_date' column to a date datatype with to_date("review_date", 'yyyy-MM-dd').alias("review_date") review_id_df = df.select(["review_id","customer_id","product_id","product_parent", to_date("review_date", 'yyyy-MM-dd').alias("review_date")]) review_id_df.show() # + id="lzMmkdKmY--D" colab={"base_uri": "https://localhost:8080/"} outputId="950a299f-56ed-434a-9def-93b27cb6e9d6" # Create the vine_table. DataFrame vine_df = df.select(["review_id","star_rating","helpful_votes","total_votes","vine","verified_purchase"]) vine_df.show() # + id="7jiUvs1aY--L" from pyspark.sql.functions import col # + id="T2zgZ-aKY--Q" colab={"base_uri": "https://localhost:8080/"} outputId="6dc964a4-c7dc-4351-db27-6abb30c91825" filtered_df = df.filter(col("total_votes") >=10 ) filtered_df.show() # + id="1m3yzn-LY--U" colab={"base_uri": "https://localhost:8080/"} outputId="ada5da7c-7b95-4515-9bc1-6d184f0f34a8" vote_ratio_df = filtered_df.filter(col("helpful_votes")/col("total_votes")>=0.5) vote_ratio_df.show() # + id="KbXri15fY--Z" colab={"base_uri": "https://localhost:8080/"} outputId="5e42636e-24b5-4302-ae98-327e58f656ad" paid_review_df = vote_ratio_df.filter(col("vine") == "Y") paid_review_df.show() # + id="XdQknSHLY--e" colab={"base_uri": "https://localhost:8080/"} outputId="d937a98e-065c-48bb-e435-829c53b95d5f" unpaid_review_df = vote_ratio_df.filter(col("vine") == "N") unpaid_review_df.show() # + id="Exuo6ebUsCqW" colab={"base_uri": "https://localhost:8080/"} outputId="49342143-40e6-4c41-ce3a-7b565a00f103" # PAID # total reviews: total_paid_reviews = paid_review_df.count() print(total_paid_reviews) # + colab={"base_uri": "https://localhost:8080/"} id="q4a_4tyb6etb" outputId="8ded3b4a-de29-4665-eceb-1ed7cfe6e975" # 5 star reviews: paid_five_star_reviews = paid_review_df.filter(col("star_rating")==5).count() print(paid_five_star_reviews) # + colab={"base_uri": "https://localhost:8080/"} id="o3hDLlOd9btf" outputId="c8554863-b26c-4d62-bed3-79b2bc7c742e" # % 5 star reviews paid_ratio = paid_five_star_reviews/total_paid_reviews print(paid_ratio) # + colab={"base_uri": "https://localhost:8080/"} id="A7ahCnlr9dHF" outputId="bf102078-50f3-4bdf-a6ca-6bcc8b52435f" # UNPAID # total reviews: total_unpaid_reviews = unpaid_review_df.count() print(total_unpaid_reviews) # + colab={"base_uri": "https://localhost:8080/"} id="2JS_ahhB9ev9" outputId="7864a4b5-64ac-443b-ae83-5196ab68c705" # 5 star reviews: unpaid_five_star_reviews = unpaid_review_df.filter(col("star_rating")==5).count() print(unpaid_five_star_reviews) # + id="rwPtdrEc9gEd" # % 5 star reviews unpaid_ratio = unpaid_five_star_reviews/total_unpaid_reviews print(unpaid_ratio) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="Vsxh0ki7SuNL" outputId="32fa164b-e0ac-41a2-d958-20206bdd3df2" # !pip install catboost # !pip install scikit-learn # !pip install ipywidgets # + colab={"base_uri": "https://localhost:8080/"} id="R_JXpRWnS610" outputId="f130f866-15bb-4a51-e05a-7a6b325873d2" from google.colab import drive drive.mount('/content/drive') # + id="F0Up4UaoTANS" train_csv_link = '/content/drive/MyDrive/nsu-abmd-2021-e-commerce/train.csv' # + id="OND6Nq0Zs6Ul" test_csv_link = '/content/drive/MyDrive/nsu-abmd-2021-e-commerce/test.csv' # + id="cg6DxcwC9Rbg" submission_csv_link = '/content/drive/MyDrive/nsu-abmd-2021-e-commerce/submission_example.csv' # + id="yDWIyJGSX5kx" import pandas as pd import numpy as np # + colab={"base_uri": "https://localhost:8080/"} id="PZ23Ast3E7dJ" outputId="f57481f5-9a13-433d-8e63-fc43d42910f3" train = pd.read_csv(train_csv_link) test = pd.read_csv(test_csv_link) # + id="QHfd8Gh_XESw" test = pd.read_csv(test_csv_link) # + id="HDocpl_uE9DK" subs = pd.read_csv(submission_csv_link) # + colab={"base_uri": "https://localhost:8080/"} id="HXgNKmkuGiOc" outputId="7cf8fe34-1c7d-4c01-f7d7-4538b2f5eab8" # !pip install cloudpickle==1.2.0 # + colab={"base_uri": "https://localhost:8080/"} id="FE2LefXMGwji" outputId="61ac5c1f-743c-45bd-f35c-22a16e2a3c56" # !pip install folium==0.2.1 # + colab={"base_uri": "https://localhost:8080/"} id="zUTp95A8G9y8" outputId="c4da0d1b-ace5-410e-d812-e0af594d3537" # !pip install imgaug==0.2.5 # + colab={"base_uri": "https://localhost:8080/"} id="BMkhMJ1vEI5N" outputId="bb26811c-0a1a-45ff-f2bb-e9ef70be49a2" # !pip install autogluon.tabular[all] # + id="HkEReOaFEmHa" RUNTIME = 3228 y = "Sales" # + id="WzMBm5AJHpAP" from autogluon.tabular import TabularPredictor # + colab={"base_uri": "https://localhost:8080/"} id="xdinYFTWbiKq" outputId="a65ce285-af4d-4fb0-b9cc-7c2dcf5c6283" train.info() # + colab={"base_uri": "https://localhost:8080/"} id="RJ8z5NE4Eu3D" outputId="504fe88c-33f2-4387-ae98-3df763ce987b" predictor = TabularPredictor(label=y, eval_metric='mse', path='/content/drive/MyDrive/nsu-abmd-2021-e-commerce/agModels').fit(train, presets='best_quality', time_limit=RUNTIME) # + colab={"base_uri": "https://localhost:8080/"} id="BtgIfZamV3mu" outputId="b9ce7f7a-acf7-4f07-971f-8feb8b68aa23" test.info() # + id="QJX1tqI6Ew92" preds = predictor.predict(test) # + id="D18Kk7WzEyr3" subs['Expected'] = pd.DataFrame(preds)['Sales'] # + colab={"base_uri": "https://localhost:8080/"} id="7yoo54GCX2S_" outputId="7a198a3f-c448-494e-fc7f-143067b8c3db" subs.info() # + id="H2YTi_tgE19x" subs.to_csv('/content/drive/MyDrive/nsu-abmd-2021-e-commerce/submission.csv',index = False) # + [markdown] id="LBtDYMnn9pOu" # Загрузка обученной модели # + id="qw8dNowv9crR" predictor = TabularPredictor.load('/content/drive/MyDrive/nsu-abmd-2021-e-commerce/agModels') # + id="QLpi-DnG9oAG" preds = predictor.predict(test) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="ndaf2je-jTzv" import pandas as pd import numpy as np np.random.seed(5151) from keras.models import Sequential from keras.layers import LSTM, TimeDistributed, GRU, Activation, Conv2D,AveragePooling2D,Flatten, TimeDistributed,concatenate, Input,BatchNormalization from keras.layers import Dense from keras.layers import Add from keras.models import Model from keras import regularizers from sklearn.preprocessing import MinMaxScaler from sklearn.feature_selection import RFE from matplotlib import pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 212} colab_type="code" id="TkrMZwOpjT0M" outputId="3b6ddcb3-9de6-4d6c-e83a-b7f3914c8536" url='https://raw.githubusercontent.com/NayantaraPrem/EthereumPricePrediction/master/final_dataset.csv' df = pd.read_csv(url) df.head(4) # + colab={"base_uri": "https://localhost:8080/", "height": 55} colab_type="code" id="LdsllcaZjT0o" outputId="b50c833a-6ef1-4018-ba47-00cd9e6b359a" output_column = 'Price' output_column_feature = 'Price' X = df.drop(['Date(UTC)', 'UnixTimeStamp'], axis=1) #drop rows with NaN values X = X.fillna(0) features =['AddressCount','MarketCap'] from_cols = [col for col in X.columns if 'from' in col] to_cols = [col for col in X.columns if 'to' in col] print(len(to_cols)) print(len(from_cols)) X['Price']=X['Price'].diff() X['AddressCount']=X['AddressCount'].diff() X['MarketCap']=X['MarketCap'].diff() X=X.dropna() n_timesteps = 7 n_features = len(features) n_outputs = 1 # no. of days to predict N = len(X) print(features) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="B5cPw_jpjT1B" outputId="17382e63-48a4-4a82-f002-b57b86e31bbf" train_split = 0.8 #20% test, 80% train X_train = X[0:round(N*train_split)] X_test = X[round(N*train_split):] N_test = X_test.shape[0] N_train = X_train.shape[0] len(X_train) # + colab={"base_uri": "https://localhost:8080/", "height": 243} colab_type="code" id="UxkXOJtTjT1V" outputId="5f87188f-3de0-498f-d83a-4662002c99e2" #scale training data # input = 3 features. Column 4 shifted is output input_scaler = MinMaxScaler(feature_range = (0, 1)) X_train_scaled = pd.DataFrame(input_scaler.fit_transform(X_train), columns=X_train.columns) output_scaler = MinMaxScaler(feature_range = (0, 1)) output_scaler.fit(np.array(X_train[output_column]).reshape((N_train,1))) # scaling the output column X_train_scaled.head() # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="pDsUWJ8hjT1r" outputId="e48c31ef-cb2c-4039-8f85-d42a367e43d6" #input for other features X_train_windows = [] y_train_windows = [] x_cnn = np.zeros((n_timesteps,4,15)) for i in range(N_train - n_timesteps): for j in range(n_timesteps): for k in range(15): x_cnn[j,0,k] = X_train_scaled.loc[i+j, to_cols[k*2]] x_cnn[j,1,k] = X_train_scaled.loc[i+j, to_cols[k*2+1]] if (k<2): x_cnn[j,2,k] = X_train_scaled.loc[i+j, from_cols[k*2]] x_cnn[j,3,k] = X_train_scaled.loc[i+j, from_cols[k*2+1]] if (k==2): x_cnn[j,2,k] = 0 x_cnn[j,3,k] = 0 if (k>2): x_cnn[j,2,k] = X_train_scaled.loc[i+j, from_cols[(k-1)*2]] x_cnn[j,3,k] = X_train_scaled.loc[i+j, from_cols[(k-1)*2+1]] X_train_windows.append(np.array(x_cnn)) y_train_windows.append(np.array(X_train_scaled.loc[i+n_timesteps, X_train_scaled.columns == output_column])) X_train_windows = np.array(X_train_windows) X_train_windows=np.reshape(X_train_windows,(N_train - n_timesteps,n_timesteps,4,15,1)) y_train_windows = np.array(y_train_windows) X_train_windows.shape, y_train_windows.shape # + #input for CNNs X_train_windows2 = [] for i in range(N_train - n_timesteps): X_train_windows2.append(np.array(X_train_scaled.loc[i:i+n_timesteps-1, features])) X_train_windows2 = np.array(X_train_windows2) X_train_windows2.shape # + colab={} colab_type="code" id="LwAfqGj2jT18" #define the model input1 = Input(shape = (n_timesteps,4,15,1)) model=(TimeDistributed(Conv2D(16, (1, 15), activation="relu")))(input1) model=(TimeDistributed(Conv2D(16, (4, 1), activation="relu")))(model) model=(TimeDistributed(BatchNormalization(axis = -1)))(model) model=(TimeDistributed(Flatten()))(model) input2 = Input(shape = (n_timesteps,len(features) )) model_merge= concatenate([model, input2]) model_merge=LSTM( 10, activation='relu', return_sequences=True)(model_merge) model_merge=LSTM(10, activation='relu')(model_merge) model_merge=(Activation('softmax'))(model_merge) output=(Dense(1 ,activation='sigmoid'))(model_merge) final_model = Model(inputs=[input1, input2], outputs=[output]) final_model.compile(optimizer='adam', loss='mse') # train model final_model.fit([X_train_windows, X_train_windows2], y_train_windows, epochs=200, verbose=0) # + colab={} colab_type="code" id="wvxkdZmujT2J" # test prediction #scale the input to [0,1] X_test_scaled = pd.DataFrame(input_scaler.fit_transform(X_test), columns=X_test.columns) #input for CNN X_test_windows = [] y_test_windows = [] x_cnn = np.zeros((n_timesteps,4,15)) for i in range(N_test - n_timesteps): for j in range(n_timesteps): for k in range(15): x_cnn[j,0,k] = X_test_scaled.loc[i+j, to_cols[k*2]] x_cnn[j,1,k] = X_test_scaled.loc[i+j, to_cols[k*2+1]] if (k<2): x_cnn[j,2,k] = X_test_scaled.loc[i+j, from_cols[k*2]] x_cnn[j,3,k] = X_test_scaled.loc[i+j, from_cols[k*2+1]] if (k==2): x_cnn[j,2,k] = 0 x_cnn[j,3,k] = 0 if (k>2): x_cnn[j,2,k] = X_test_scaled.loc[i+j, from_cols[(k-1)*2]] x_cnn[j,3,k] = X_test_scaled.loc[i+j, from_cols[(k-1)*2+1]] X_test_windows.append(np.array(x_cnn)) y_test_windows.append(np.array(X_test_scaled.loc[i+n_timesteps, X_test_scaled.columns == output_column])) X_test_windows = np.array(X_test_windows) X_test_windows=np.reshape(X_test_windows,(N_test - n_timesteps,n_timesteps,4,15,1)) y_test_windows = np.array(y_test_windows) X_test_windows.shape, y_test_windows.shape # + #input for other features X_test_windows2 = [] for i in range(N_test - n_timesteps): X_test_windows2.append(np.array(X_train_scaled.loc[i:i+n_timesteps-1, features])) X_test_windows2 = np.array(X_test_windows2) X_test_windows2.shape # + #run predictions on each window y_pred = [] y_pred = final_model.predict([X_test_windows,X_test_windows2]) # rescale the predictions back to the original values y_pred_scaled = output_scaler.inverse_transform(y_pred) y_actual_scaled = output_scaler.inverse_transform(y_test_windows) # + colab={} colab_type="code" id="wEC5So1wjT2z" plt.plot(y_pred_scaled, color='red', label='Predicted Prices') plt.plot(y_actual_scaled, color='green', label='Actual Prices') plt.legend() plt.show() # + colab={} colab_type="code" id="2Wjs3h9YjT3B" def make_df(y_pred,y_true): y_pred.name = 'y_pred' y_true.name = 'y_true' df = pd.concat([y_pred,y_true],axis=1) df['sign_pred'] = df.y_pred.apply(np.sign) df['sign_true'] = df.y_true.apply(np.sign) df['is_correct'] = 0 df.loc[df.sign_pred * df.sign_true > 0 ,'is_correct'] = 1 # only registers 1 when prediction was made AND it was correct df['is_incorrect'] = 0 df.loc[df.sign_pred * df.sign_true < 0,'is_incorrect'] = 1 # only registers 1 when prediction was made AND it was wrong df['is_predicted'] = df.is_correct + df.is_incorrect df['result'] = df.sign_pred * df.y_true return df def calc_scorecard(df): scorecard = pd.Series() # building block metrics scorecard.loc['accuracy'] = df.is_correct.sum()*1. / (df.is_predicted.sum()*1.)*100 scorecard.loc['edge'] = df.result.mean() scorecard.loc['noise'] = df.y_pred.diff().abs().mean() # derived metrics scorecard.loc['y_true_chg'] = df.y_true.abs().mean() scorecard.loc['y_pred_chg'] = df.y_pred.abs().mean() scorecard.loc['prediction_calibration'] = scorecard.loc['y_pred_chg']/scorecard.loc['y_true_chg'] scorecard.loc['capture_ratio'] = scorecard.loc['edge']/scorecard.loc['y_true_chg']*100 # metrics for a subset of predictions scorecard.loc['edge_long'] = df[df.sign_pred == 1].result.mean() - df.y_true.mean() scorecard.loc['edge_short'] = df[df.sign_pred == -1].result.mean() - df.y_true.mean() scorecard.loc['edge_win'] = df[df.is_correct == 1].result.mean() - df.y_true.mean() scorecard.loc['edge_lose'] = df[df.is_incorrect == 1].result.mean() - df.y_true.mean() return scorecard y_pred_scaled = y_pred_scaled.reshape(y_pred_scaled.shape[0]) y_actual_scaled = y_actual_scaled.reshape(y_actual_scaled.shape[0]) print(y_pred_scaled.shape) df = make_df(pd.Series(y_pred_scaled-0.5),pd.Series(y_actual_scaled-0.5)) print(calc_scorecard(df)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import pandas as pd import numpy as np from datetime import datetime import matplotlib.pyplot as plt from sklearn.model_selection import KFold from sklearn.feature_selection import RFECV import lightgbm as lgb from tqdm import tqdm_notebook from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelEncoder import multiprocessing from sklearn.model_selection import StratifiedKFold,TimeSeriesSplit import gc # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" def sd(col, max_loss_limit=0.001, avg_loss_limit=0.001, na_loss_limit=0, n_uniq_loss_limit=0, fillna=0): """ max_loss_limit - don't allow any float to lose precision more than this value. Any values are ok for GBT algorithms as long as you don't unique values. See https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_decimal_values_in_[0,_1] avg_loss_limit - same but calculates avg throughout the series. na_loss_limit - not really useful. n_uniq_loss_limit - very important parameter. If you have a float field with very high cardinality you can set this value to something like n_records * 0.01 in order to allow some field relaxing. """ is_float = str(col.dtypes)[:5] == 'float' na_count = col.isna().sum() n_uniq = col.nunique(dropna=False) try_types = ['float16', 'float32'] if na_count <= na_loss_limit: try_types = ['int8', 'int16', 'float16', 'int32', 'float32'] for type in try_types: col_tmp = col # float to int conversion => try to round to minimize casting error if is_float and (str(type)[:3] == 'int'): col_tmp = col_tmp.copy().fillna(fillna).round() col_tmp = col_tmp.astype(type) max_loss = (col_tmp - col).abs().max() avg_loss = (col_tmp - col).abs().mean() na_loss = np.abs(na_count - col_tmp.isna().sum()) n_uniq_loss = np.abs(n_uniq - col_tmp.nunique(dropna=False)) if max_loss <= max_loss_limit and avg_loss <= avg_loss_limit and na_loss <= na_loss_limit and n_uniq_loss <= n_uniq_loss_limit: return col_tmp # field can't be converted return col def reduce_mem_usage_sd(df, deep=True, verbose=False, obj_to_cat=False): numerics = ['int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2 for col in tqdm_notebook(df.columns): col_type = df[col].dtypes # collect stats na_count = df[col].isna().sum() n_uniq = df[col].nunique(dropna=False) # numerics if col_type in numerics: df[col] = sd(df[col]) # strings if (col_type == 'object') and obj_to_cat: df[col] = df[col].astype('category') if verbose: print(f'Column {col}: {col_type} -> {df[col].dtypes}, na_count={na_count}, n_uniq={n_uniq}') new_na_count = df[col].isna().sum() if (na_count != new_na_count): print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost na values. Before: {na_count}, after: {new_na_count}') new_n_uniq = df[col].nunique(dropna=False) if (n_uniq != new_n_uniq): print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost unique values. Before: {n_uniq}, after: {new_n_uniq}') end_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2 percent = 100 * (start_mem - end_mem) / start_mem print('Mem. usage decreased from {:5.2f} Mb to {:5.2f} Mb ({:.1f}% reduction)'.format(start_mem, end_mem, percent)) return df # + path='../input/ieee-fraud-detection/' print('Train readed\n') train_identity = pd.read_csv(f'{path}train_identity.csv') train_transaction = pd.read_csv(f'{path}train_transaction.csv') print('Test readed\n') test_identity = pd.read_csv(f'{path}test_identity.csv') test_transaction = pd.read_csv(f'{path}test_transaction.csv') print('Merging\n') train = pd.merge(train_transaction, train_identity, on='TransactionID', how='left') test = pd.merge(test_transaction, test_identity, on='TransactionID', how='left') del train_identity,train_transaction,test_identity,test_transaction gc.collect() train = reduce_mem_usage_sd(train, obj_to_cat=False) gc.collect() one_value_cols = [col for col in train.columns if train[col].nunique() <= 1] one_value_cols_test = [col for col in test.columns if test[col].nunique() <= 1] cols_to_drop =['id_22', 'id_27', 'id_08', 'dist2', 'id_07', 'id_21', 'id_24', 'id_25', 'id_18', 'id_26', 'D7'] cols_to_drop = list(set(one_value_cols+ one_value_cols_test+cols_to_drop)) train = train.sort_values('TransactionDT').drop(cols_to_drop, axis=1) del test gc.collect() Cat= ['ProductCD']+['card'+str(x) for x in range(1,7)]+['addr1','addr2','P_emaildomain','R_emaildomain']+['M'+str(x) for x in range(1,10)]+['id_'+str(x) for x in range(12,39)]+['DeviceType','DeviceInfo'] Cat=[col for col in Cat if (col not in cols_to_drop)] ToDel = ['isFraud', 'TransactionDT', 'TransactionID'] Num=[item for item in train.columns.values.tolist() if (item not in Cat+ToDel+cols_to_drop)] # + for f in tqdm_notebook(Cat): le = LabelEncoder() mis_pos = train[f].isnull().tolist() le.fit(list(train[f].astype(str).values)) train[f] = le.transform(list(train[f].astype(str).values)) feat_mask = [cats==(f) for cats in [x for x in train.columns]] train.iloc[mis_pos,feat_mask] = np.nan gc.collect() train = reduce_mem_usage_sd(train) X = train.sort_values('TransactionDT').drop(ToDel, axis=1) y = train.sort_values('TransactionDT')['isFraud'] del train gc.collect() X.fillna(-999, inplace=True) # + seed=1 params = { 'objective':'binary', 'boosting_type':'gbdt', 'metric':'auc', 'n_jobs':-1, 'feature_fraction': 0.44301599784064954, 'lambda_l1': 0.7185712774952702, 'lambda_l2': 0.8036657945008269, 'learning_rate': 0.006820638087926107, 'min_data_in_leaf': int(122.18518093103775), 'min_gain_to_split': 0.8732382864345388, 'min_sum_hessian_in_leaf': 0.009332742523926576, 'num_leaves': int(274.4907722765963), 'max_depth': int(30.889651140632285), 'tree_learner':'serial', 'max_bin':255, 'seed': seed, } idxs = np.arange(X.shape[0]) cv_splits = [(idxs[:350000], idxs[400000:])] # - # def stupid_iter(): # for col in [0]: # yield([(idxs[:350000], idxs[400000:])]) # cv_splits = stupid_iter() fold = TimeSeriesSplit(5) # for tr,tra in fold.split(X,y): # print(tra) clf = lgb.LGBMClassifier(**params) rfe = RFECV(estimator=clf, step=15, cv=fold, scoring='roc_auc', verbose=2) rfe.fit(X, y) print('Optimal number of features:', rfe.n_features_) plt.figure(figsize=(14, 8)) plt.xlabel("Number of features selected") plt.ylabel("Cross validation score") plt.plot(range(1, len(rfe.grid_scores_) + 1), rfe.grid_scores_) plt.show() for col in X.columns[rfe.ranking_ == 1]: print(col) Name=[col for col in X.columns[rfe.ranking_==1]] pd.DataFrame(Name,columns=['Name']).to_csv('feature_selection.csv', index=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### Using the model from Beeman (2013) [modified version Metzner et al. (2016)] # A short example demonstrating how to use the model from Beeman (2013) # # # Note: change the path in the cell below to match your file system! # + import sciunit import sys sys.path.append("/home/cm15acr/ASSRUnit/Code") from capabilities import ProduceXY from models import BeemanGenesisModel from testsAndPredictionTests import Test4040,Test3030,Test2020,Test2040,Test4020 # - # ### Parameters # + controlparams = {'Filename': 'control_model','Random Seed': 12345,'E-E Weight':30.0e-9,'I-E Weight':0.1e-9,'E-I Weight':0.6e-9,'I-I Weight':0.15e-9,'Background Noise Weight':80.0e-9,'E-Drive Weight':50.0e-9,'I-Drive Weight':1.5e-9,'Background Noise Frequency':8.0} schizparams = {'Filename': 'schiz_model','Random Seed': 12345,'E-E Weight':30.0e-9,'I-E Weight':0.05e-9,'E-I Weight':0.6e-9,'I-I Weight':0.15e-9,'Background Noise Weight':80.0e-9,'E-Drive Weight':50.0e-9,'I-Drive Weight':1.5e-9,'Background Noise Frequency':8.0} # - # ### Model test_model = BeemanGenesisModel(controlparams,schizparams) # ### Tests # + test_4040 = Test4040(observation={'ratio':0.5}) score_4040 = test_4040.judge(test_model) test_3030 = Test3030(observation={'ratio':1.0}) score_3030 = test_3030.judge(test_model) test_2020 = Test2020(observation={'ratio':1.0}) score_2020 = test_2020.judge(test_model) test_2040 = Test2040(observation={'ratio':1.0}) score_2040 = test_2040.judge(test_model) test_4020 = Test4020(observation={'ratio':1.0}) score_4020 = test_4020.judge(test_model) print '\n\n Results 4040:\n' print score_4040 print '\n Results 3030:\n' print score_3030 print '\n Results 2020:\n' print score_2020 print '\n Results 2040:\n' print score_2040 print '\n Results 4020:\n' print score_4020 # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SQL Basics # # **Setting up the conda env:** # # ``` # conda create -n sql python # conda activate sql # conda install ipython-sql sqlalchemy psycopg2 notebook pandas -c conda-forge # ``` # # # **Sample dataset:** # - [cities.csv](https://github.com/giswqs/postgis/blob/master/data/cities.csv) # - [countries.csv](https://raw.githubusercontent.com/giswqs/postgis/master/data/countries.csv) # # ## Connecting to the database # %load_ext sql import os host = "localhost" database = "sdb" user = os.getenv('SQL_USER') password = os.getenv('SQL_PASSWORD') connection_string = f"postgresql://{user}:{password}@{host}/{database}" # %sql $connection_string # + language="sql" # # SELECT * FROM cities LIMIT 10 # - # ## The SQL SELECT statement # + tags=["hide-output"] language="sql" # # SELECT * FROM cities # + language="sql" # # SELECT * FROM cities LIMIT 10 # + language="sql" # # SELECT name, country FROM cities LIMIT 10 # + language="sql" # # SELECT DISTINCT country FROM cities LIMIT 10 # + language="sql" # # SELECT COUNT(DISTINCT country) FROM cities # + language="sql" # # SELECT MAX(population) FROM cities # + language="sql" # # SELECT SUM(population) FROM cities # + language="sql" # # SELECT AVG(population) FROM cities # + language="sql" # # SELECT * FROM cities ORDER BY country LIMIT 10 # + language="sql" # # SELECT * FROM cities ORDER BY country ASC, population DESC LIMIT 10 # - # ## The WHERE Clause # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country='USA' # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country='USA' OR country='CAN' # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country='USA' AND population>1000000 # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country LIKE 'U%' # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country LIKE '%A' # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country LIKE '_S_' # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE country IN ('USA', 'CAN', 'CHN') # + tags=["hide-output"] language="sql" # # SELECT * FROM cities WHERE population BETWEEN 1000000 AND 10000000 # - # ## SQL Joins # # Reference: https://www.w3schools.com/sql/sql_join.asp # # Here are the different types of the JOINs in SQL: # # - `(INNER) JOIN`: Returns records that have matching values in both tables # - `LEFT (OUTER) JOIN`: Returns all records from the left table, and the matched records from the right table # - `RIGHT (OUTER) JOIN`: Returns all records from the right table, and the matched records from the left table # - `FULL (OUTER) JOIN`: Returns all records when there is a match in either left or right table # # ![](https://i.imgur.com/mITYzuS.png) # + language="sql" # # SELECT COUNT(*) FROM cities # + language="sql" # # SELECT * FROM cities LIMIT 10 # + language="sql" # # SELECT COUNT(*) FROM countries # + language="sql" # # SELECT * FROM countries LIMIT 10 # - # ### SQL Inner Join # + tags=["hide-output"] language="sql" # # SELECT * FROM cities INNER JOIN countries ON cities.country = countries."Alpha3_code" # + tags=["hide-output"] language="sql" # # SELECT name, country, countries."Country" FROM cities INNER JOIN countries ON cities.country = countries."Alpha3_code" # - # ### SQL Left Join # + tags=["hide-output"] language="sql" # # SELECT * FROM cities LEFT JOIN countries ON cities.country = countries."Alpha3_code" # - # ### SQL Right Join # + tags=["hide-output"] language="sql" # # SELECT * FROM cities RIGHT JOIN countries ON cities.country = countries."Alpha3_code" # - # ### SQL Full Join # + tags=["hide-output"] language="sql" # # SELECT * FROM cities FULL JOIN countries ON cities.country = countries."Alpha3_code" # - # ### SQL Union # + tags=["hide-output"] language="sql" # # SELECT country FROM cities # UNION # SELECT "Alpha3_code" FROM countries # - # ## Aggregation # # ### Group By # + tags=["hide-output"] language="sql" # # SELECT COUNT(name), country # FROM cities # GROUP BY country # ORDER BY COUNT(name) DESC # + tags=["hide-output"] language="sql" # # SELECT countries."Country", COUNT(name) # FROM cities # LEFT JOIN countries ON cities.country = countries."Alpha3_code" # GROUP BY countries."Country" # ORDER BY COUNT(name) DESC # - # ### Having # + language="sql" # # SELECT COUNT(name), country # FROM cities # GROUP BY country # HAVING COUNT(name) > 40 # ORDER BY COUNT(name) DESC # + language="sql" # # SELECT countries."Country", COUNT(name) # FROM cities # LEFT JOIN countries ON cities.country = countries."Alpha3_code" # GROUP BY countries."Country" # HAVING COUNT(name) > 40 # ORDER BY COUNT(name) DESC # - # ## Conditional statements # + tags=["hide-output"] language="sql" # # SELECT name, population, # CASE # WHEN population > 10000000 THEN 'Megacity' # WHEN population > 1000000 THEN 'Large city' # ELSE 'Small city' # END AS category # FROM cities # - # ## Saving results # + language="sql" # # SELECT * # INTO cities_new # FROM cities # + language="sql" # # DROP TABLE IF EXISTS cities_usa; # # SELECT * # INTO cities_usa # FROM cities # WHERE country = 'USA' # + language="sql" # # INSERT INTO cities_usa # SELECT * # FROM cities # WHERE country = 'CAN' # - # ## SQL Comments # # ### Single line coments # + language="sql" # # SELECT * FROM cities LIMIT 10 -- This is a comment; # - # ### Multi-line comments # + language="sql" # # SELECT COUNT(name), country # FROM cities # /* # * Adding Group by # * Adding Order by # */ # GROUP BY country # ORDER BY COUNT(name) DESC # LIMIT 10 # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # imports import json import multiprocessing import os import re import string import sys sys.path.append("../") import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import gensim import matplotlib.pyplot as plt import nltk import numpy as np import pandas as pd import pyLDAvis.gensim pyLDAvis.enable_notebook() # from gensim.corpora import Dictionary from datahandler import DataHandler # + # fcns stopwords = nltk.corpus.stopwords.words() def filter_ngram(ngram, n:int): tag = nltk.pos_tag(ngram) if tag[0][1] not in ['JJ', 'NN'] and tag[1][1] not in ['NN']: return False if n == 2: if ngram[0] in stopwords or ngram[1] in stopwords: return False if n==3: if ngram[0] in stopwords or ngram[-1] in stopwords or ngram[1] in stopwords: return False if 'n' in ngram or 't' in ngram: return False if 'PRON' in ngram: return False return True def merge_ngram(x, bigrams, trigrams): for gram in trigrams: x = x.replace(gram, '_'.join(gram.split())) for gram in bigrams: x = x.replace(gram, '_'.join(gram.split())) return x def filter_stopwords(x): return [word for word in x.split() if word not in stopwords and len(word)>2] def filter_pos(x): pos = nltk.pos_tag(x) filtered = [word[0] for word in pos if word[1] in ['NN']] return filtered # + seed = 123 data_dir = os.path.join(os.pardir, os.pardir, "web_data", "preproc") print("Loading corpus") corpus = DataHandler(data_dir, seed) # print some various information from the corpus print("Total Word Count: {}".format(corpus.total_words)) print("Number of Docs in the Corpus: {}".format(corpus.total_docs)) docs_fpath = corpus.data.keys() # create dictionary for filename and text fpath_txt = {} for fpath in docs_fpath: with open(fpath, "r") as f: fpath_txt[fpath] = f.read() # make dataframe df = (pd.DataFrame.from_dict(fpath_txt, orient='index') .reset_index().rename(index = str, columns = {'index': 'file_name', 0: 'text'})) corpus = df['text'] print("Finished loading corpus") # + min_bigram_frequency = 50 bigram_measures = nltk.collocations.BigramAssocMeasures() finder = nltk.collocations.BigramCollocationFinder.from_documents([doc.split() for doc in corpus]) finder.apply_freq_filter(min_bigram_frequency) bigram_scores = finder.score_ngrams(bigram_measures.pmi) bigram_pmi = pd.DataFrame(bigram_scores) bigram_pmi.columns = ['bigram', 'pmi'] bigram_pmi.sort_values(by='pmi', axis = 0, ascending = False, inplace = True) min_trigram_frequency = 50 trigram_measures = nltk.collocations.TrigramAssocMeasures() finder = nltk.collocations.TrigramCollocationFinder.from_documents([doc.split() for doc in corpus]) finder.apply_freq_filter(min_trigram_frequency) trigram_scores = finder.score_ngrams(trigram_measures.pmi) trigram_pmi = pd.DataFrame(trigram_scores) trigram_pmi.columns = ['trigram', 'pmi'] trigram_pmi.sort_values(by='pmi', axis = 0, ascending = False, inplace = True) print("cell done") # + min_pmi = 5 max_ngrams = 500 filtered_bigram = bigram_pmi[bigram_pmi.apply(lambda bigram:\ filter_ngram(bigram['bigram'], 2)\ and min_pmi > 5, axis = 1)][:max_ngrams] filtered_trigram = trigram_pmi[trigram_pmi.apply(lambda trigram: \ filter_ngram(trigram['trigram'], 3)\ and min_pmi > 5, axis = 1)][:max_ngrams] bigrams = [' '.join(x) for x in filtered_bigram.bigram.values if len(x[0]) > 2 or len(x[1]) > 2] trigrams = [' '.join(x) for x in filtered_trigram.trigram.values if len(x[0]) > 2 or len(x[1]) > 2 and len(x[2]) > 2] print("cell done") # + corpus_w_ngrams = corpus.copy() corpus_w_ngrams = corpus_w_ngrams.map(lambda x: merge_ngram(x, bigrams, trigrams)) print("cell done") # - p = multiprocessing.Pool() corpus_w_ngrams = p.map(filter_stopwords, [doc for doc in corpus_w_ngrams]) p.close() print("cell done") p = multiprocessing.Pool() final_corpus = p.map(filter_pos, [doc for doc in corpus_w_ngrams]) p.close() print("cell done") dictionary = gensim.corpora.Dictionary(final_corpus) dictionary.filter_extremes(no_below=10, no_above=0.20) corpus_bow = [dictionary.doc2bow(doc) for doc in final_corpus] print("cell done") Lda = gensim.models.ldamodel.LdaModel ldamodel = Lda(corpus_bow, num_topics=5, id2word = dictionary, passes=40,\ iterations=200, chunksize = 100, eval_every = None) print("cell done") p = pyLDAvis.gensim.prepare(ldamodel, corpus_bow, dictionary, mds='tsne') pyLDAvis.save_html(p, 'web_lda_mp_debug.html') coherence = [] for ii in range(3,5): print('lda with {} topics'.format(ii)) Lda = gensim.models.ldamodel.LdaModel ldamodel = Lda(corpus_bow, num_topics=ii, id2word = dictionary, passes=40,\ iterations=200, chunksize = 100, eval_every = None) print("fit model, computing coherence") cm = gensim.models.coherencemodel.CoherenceModel(model=ldamodel, texts=final_corpus,\ dictionary=dictionary, coherence='c_v') coherence.append((ii,cm.get_coherence())) print("generating tsne viz") p = pyLDAvis.gensim.prepare(ldamodel, corpus_bow, dictionary, mds='tsne') title = 'web_lda_mp_debug_cm_{}.html'.format(ii) pyLDAvis.save_html(p, title) print("done") n_topics = [x[0] for x in coherence] cm = [x[1] for x in coherence] plt.plot(n_topics,cm) plt.scatter(n_topics,cm) plt.title('Number of Topics vs. Coherence') plt.xlabel('Number of Topics') plt.ylabel('Coherence') plt.xticks(x_val) plt.savefig("topic_coherence.png") plt.close() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import cv2 import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = [20, 10] with open('bboxes.json','r') as f: data = json.load(f) keys = list(data.keys()) i=0 img = plt.imread(keys[i],-1) plt.imshow(img) img.shape bboxes = data[keys[i]] bboxes = list(map(lambda x: list(map(lambda y: float(y), x.strip().split(','))),bboxes)) for x,y,w,h in bboxes: ymin,xmin,ymax,xmax = int(y),int(x),int(y+h),int(x+w) img = cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(0,255,0,255),2) plt.imshow(img) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Example of how basic sentiment analysis for articles will work: import newspaper import transformers from transformers import pipeline #Example website website = 'https://www.afp.com/en/news-hub' source = newspaper.build(website, memoize_articles=False) # + #populate and return a list of articles def create_text_corpus(site_source): article_list = [] for article_object in site_source.articles: if 'www.afp.com/en/' in article_object.url: article_object.download() article_object.parse() article_list.append(article_object.text) return article_list text_list = create_text_corpus(source) # + #download sentiment analysis model and run on list nlp = pipeline('sentiment-analysis') def sentiment_analysis(lst): return nlp() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="crYdbMmW_3kn" colab_type="text" # # Import and preprocessing # + id="xx3AkgtVxr7h" colab_type="code" outputId="0648bc6f-fbbd-4654-a515-3c1694d591bc" colab={"base_uri": "https://localhost:8080/", "height": 35} import pandas as pd import numpy as np import matplotlib.pyplot as plt from numpy import genfromtxt from ast import literal_eval from sklearn.feature_extraction.text import TfidfVectorizer from keras.models import Sequential from keras.layers import Dense, Dropout, Conv1D, GlobalMaxPooling1D from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline from keras import regularizers import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/gdrive') df = pd.read_csv('/content/gdrive/My Drive/datasets/dataset.csv', header=None, names=["artist", "song", "genre", "tokens"]) def to_list(x): return x[1:-1].split(',') def unison_shuffled_copies(a, b): assert a.shape[0] == b.shape[0] p = np.random.permutation(a.shape[0]) return a[p], b[p] def dummy_fun(x): return x df['tokens'] = df['tokens'].apply(to_list) data = df.values tfidf = TfidfVectorizer( analyzer='word', tokenizer=dummy_fun, preprocessor=dummy_fun, token_pattern=None) X = tfidf.fit_transform(data[:,3]) Y = data[:, 2] encoder = LabelEncoder() encoder.fit(Y) Y = encoder.transform(Y) Y = np_utils.to_categorical(Y) # + id="VlZKjNbyo9lD" colab_type="code" colab={} X_train = X[:5200] # X_valid = X[4800:5500] X_test = X[5200:] Y_train = Y[:5200] # Y_valid = Y[4800:5500] Y_test = Y[5200:] # + [markdown] id="EYx2rsAAAClh" colab_type="text" # # Neural Network # + id="IfyC1mEuz4KV" colab_type="code" outputId="b8d85b44-5990-4633-f5e0-cc0bbc42e96a" colab={"base_uri": "https://localhost:8080/", "height": 431} seed = 7 np.random.seed(seed) INPUT_SIZE = X.shape[1] def baseline_model(): model = Sequential() model.add(Dense(100, input_dim=INPUT_SIZE, activation='relu')) model.add(Dropout(.5)) model.add(Dense(100, activation='relu')) model.add(Dropout(.5)) model.add(Dense(7, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = baseline_model() history = model.fit(X_train, Y_train, epochs=10, verbose=1, validation_split=0.2, shuffle=True, batch_size=10) loss, accuracy = model.evaluate(X_train, Y_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy)) loss, accuracy = model.evaluate(X_test, Y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy)) # estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=20, verbose=0) # kfold = KFold(n_splits=10, shuffle=True, random_state=seed) # results = cross_val_score(estimator, X, dummy_y, cv=kfold) # print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) # + id="oK0tVf94jkwj" colab_type="code" outputId="60a8cf02-039f-4d7c-fe5a-ae447a418a68" colab={"base_uri": "https://localhost:8080/", "height": 354} plt.style.use('ggplot') def plot_history(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] x = range(1, len(acc) + 1) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(x, acc, 'b', label='Training acc') plt.plot(x, val_acc, 'r', label='Validation acc') plt.ylabel('accuracy') plt.xlabel('epoch') plt.title('ANN Training and Validation accuracy') plt.legend() plt.subplot(1, 2, 2) plt.plot(x, loss, 'b', label='Training loss') plt.plot(x, val_loss, 'r', label='Validation loss') plt.ylabel('Loss') plt.xlabel('epoch') plt.title('Training and validation loss') plt.legend() plot_history(history) # + [markdown] id="8NTcz4Ck_lPM" colab_type="text" # # Convolutional Neural Network # + id="uXHzAqTy9QqE" colab_type="code" outputId="4c8f3423-df8f-4aef-f73d-b26dc22ad11b" colab={"base_uri": "https://localhost:8080/", "height": 431} def cnn_model(): model = Sequential() model.add(Conv1D(512, 5,input_shape=(1, INPUT_SIZE))) model.add(Dense(36, activation='relu')) model.add(Dropout(.5)) model.add(Dense(36, activation='relu')) model.add(Dropout(.5)) model.add(Dense(7, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = baseline_model() history = model.fit(X_train, Y_train, epochs=10, verbose=1, validation_split=0.2, shuffle=True, batch_size=10) loss, accuracy = model.evaluate(X_train, Y_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy)) loss, accuracy = model.evaluate(X_test, Y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy)) # + id="vFrEBNDfp-rN" colab_type="code" outputId="3e4b59db-6144-484f-facf-52ce667736b9" colab={"base_uri": "https://localhost:8080/", "height": 354} plt.style.use('ggplot') def plot_history(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] x = range(1, len(acc) + 1) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(x, acc, 'b', label='Training acc') plt.plot(x, val_acc, 'r', label='Validation acc') plt.ylabel('accuracy') plt.xlabel('epoch') plt.title('CNN Training and validation accuracy') plt.legend() plt.subplot(1, 2, 2) plt.plot(x, loss, 'b', label='Training loss') plt.plot(x, val_loss, 'r', label='Validation loss') plt.ylabel('Loss') plt.xlabel('epoch') plt.title('Training and validation loss') plt.legend() plot_history(history) # + id="hoonbHhffc2d" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 3D Instance Segmentation with Discriminative Instance Loss # --- # Implemntation of paper: # # [Semantic Instance Segmentation with a Discriminative Loss Function](https://arxiv.org/abs/1708.02551) # + import os import errno import datetime import numpy as np import deepcell # - # ## Load the data # # ### Download the data from `deepcell.datasets` # # `deepcell.datasets` provides access to a set of annotated live-cell imaging datasets which can be used for training cell segmentation and tracking models. # All dataset objects share the `load_data()` method, which allows the user to specify the name of the file (`path`), the fraction of data reserved for testing (`test_size`) and a `seed` which is used to generate the random train-test split. # Metadata associated with the dataset can be accessed through the `metadata` attribute. # + # Download the data (saves to ~/.keras/datasets) filename = 'mousebrain.npz' test_size = 0.2 # % of data saved as test seed = 0 # seed for random train-test split (X_train, y_train), (X_test, y_test) = deepcell.datasets.mousebrain.load_data( filename, test_size=test_size, seed=seed) print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape)) # - # ### Set up filepath constants # + # the path to the data file is currently required for `train_model_()` functions # change DATA_DIR if you are not using `deepcell.datasets` DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets')) # DATA_FILE should be a npz file, preferably from `make_training_data` DATA_FILE = os.path.join(DATA_DIR, filename) # confirm the data file is available assert os.path.isfile(DATA_FILE) # + # Set up other required filepaths # If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR) ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX)) LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX)) # create directories if they do not exist for d in (MODEL_DIR, LOG_DIR): try: os.makedirs(d) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise # - # ## Create the Foreground/Background FeatureNet Model # # Here we instantiate two `FeatureNet` models from `deepcell.model_zoo` for foreground/background separation as well as the interior/edge segmentation. norm_method = 'whole_image' # data normalization receptive_field = 61 # should be adjusted for the scale of the data n_skips = 1 # number of skip-connections (only for FC training) frames_per_batch = 3 embedding_dim = 3 # + from deepcell import model_zoo fgbg_model = model_zoo.bn_feature_net_skip_3D( receptive_field=receptive_field, n_features=2, # segmentation mask (is_cell, is_not_cell) n_frames=frames_per_batch, n_skips=n_skips, n_conv_filters=32, n_dense_filters=128, input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])), multires=False, last_only=False, norm_method=norm_method) # - # ## Prepare for training # # ### Set up training parameters. # # There are a number of tunable hyper parameters necessary for training deep learning models: # # **model_name**: Incorporated into any files generated during the training process. # # **n_epoch**: The number of complete passes through the training dataset. # # **lr**: The learning rate determines the speed at which the model learns. Specifically it controls the relative size of the updates to model values after each batch. # # **optimizer**: The TensorFlow module [tf.keras.optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers) offers optimizers with a variety of algorithm implementations. DeepCell typically uses the Adam or the SGD optimizers. # # **lr_sched**: A learning rate scheduler allows the learning rate to adapt over the course of model training. Typically a larger learning rate is preferred during the start of the training process, while a small learning rate allows for fine-tuning during the end of training. # # **batch_size**: The batch size determines the number of samples that are processed before the model is updated. The value must be greater than one and less than or equal to the number of samples in the training dataset. # + from tensorflow.keras.optimizers import SGD from deepcell.utils.train_utils import rate_scheduler fgbg_model_name = 'disc_fgbg' disc_3d_model_name = 'disc_3d' n_epoch = 5 # Number of training epochs lr = 0.01 optimizer = SGD(learning_rate=lr, decay=1e-6, momentum=0.9, nesterov=True) lr_sched = rate_scheduler(lr=lr, decay=0.99) batch_size = 1 # FC training uses 1 image per batch # - # ### Create the DataGenerators # # The `MovieDataGenerator` outputs a raw image (`X`) with it's labeled annotation mask (`y`). Additionally, it can apply a transform to `y` to change the task the model learns. Below we generate 2 training and validation data sets for both the foreground/background model and the pixelwise model. # + from deepcell.image_generators import MovieDataGenerator datagen = MovieDataGenerator( rotation_range=180, zoom_range=(.8, 1.2), horizontal_flip=True, vertical_flip=True) datagen_val = MovieDataGenerator() # + # Create the foreground/background data iterators fgbg_train_data = datagen.flow( {'X': X_train, 'y': y_train}, seed=seed, skip=n_skips, transform='fgbg', batch_size=batch_size) fgbg_val_data = datagen_val.flow( {'X': X_test, 'y': y_test}, seed=seed, skip=n_skips, transform='fgbg', batch_size=batch_size) # + # Create the pixelwise data iterators disc_train_data = datagen.flow( {'X': X_train, 'y': y_train}, seed=seed, skip=n_skips, transform='disc', batch_size=batch_size) disc_val_data = datagen_val.flow( {'X': X_test, 'y': y_test}, seed=seed, skip=n_skips, transform='disc', batch_size=batch_size) # - # ### Compile the model with a loss function # # Each model is trained with it's own loss function. `weighted_categorical_crossentropy` is often used for classification models, and `disc_loss` is used for the discriminative instance models. # + from deepcell import losses def loss_function(y_true, y_pred): return losses.weighted_categorical_crossentropy( y_true, y_pred, n_classes=2, from_logits=False) fgbg_model.compile( loss=loss_function, optimizer=SGD(learning_rate=lr, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy']) # - # ## Train the foreground/background model # # Call `fit()` on the compiled model, along with a default set of callbacks. # + from deepcell.utils.train_utils import get_callbacks from deepcell.utils.train_utils import count_gpus model_path = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name)) loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(fgbg_model_name)) num_gpus = count_gpus() print('Training on', num_gpus, 'GPUs.') train_callbacks = get_callbacks( model_path, lr_sched=lr_sched, save_weights_only=num_gpus >= 2, monitor='val_loss', verbose=1) loss_history = fgbg_model.fit( fgbg_train_data, steps_per_epoch=fgbg_train_data.y.shape[0] // batch_size, epochs=n_epoch, validation_data=fgbg_val_data, validation_steps=fgbg_val_data.y.shape[0] // batch_size, callbacks=train_callbacks) # - # ## Create the 3D vector embedding FeatureNet Model # # Instatiate a `FeatureNet` for discriminative instance loss, which maps each pixel to a N-dimensional vector. # + from deepcell import model_zoo disc_3d_model = model_zoo.bn_feature_net_skip_3D( fgbg_model=fgbg_model, receptive_field=receptive_field, n_skips=n_skips, n_features=embedding_dim, n_dense_filters=128, n_conv_filters=32, input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])), norm_method=norm_method) # - # ### Compile the model with a loss function # # Just like the foreground/background model, the `disc` model is compiled with the discriminative instance loss (`disc_loss`) function. # + import tensorflow as tf from deepcell import losses disc_3d_model.compile( loss=losses.discriminative_instance_loss, optimizer=SGD(learning_rate=lr, decay=1e-6, momentum=0.9, nesterov=True), metrics=[tf.keras.metrics.Accuracy()]) # - # ## Train the 3D vector embedding `disc` model # # Call `fit()` on the compiled model, along with a default set of callbacks. # + from deepcell.utils.train_utils import get_callbacks from deepcell.utils.train_utils import count_gpus model_path = os.path.join(MODEL_DIR, '{}.h5'.format(disc_3d_model_name)) loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(disc_3d_model_name)) num_gpus = count_gpus() print('Training on', num_gpus, 'GPUs.') train_callbacks = get_callbacks( model_path, lr_sched=lr_sched, save_weights_only=num_gpus >= 2, monitor='val_loss', verbose=1) loss_history = disc_3d_model.fit( disc_train_data, steps_per_epoch=disc_train_data.y.shape[0] // batch_size, epochs=n_epoch, validation_data=disc_val_data, validation_steps=disc_val_data.y.shape[0] // batch_size, callbacks=train_callbacks) # - # ### Run the model # + # With the trained model, make predictions on testing data test_images = disc_3d_model.predict(X_test[:1, :frames_per_batch]) test_images_fgbg = fgbg_model.predict(X_test[:1, :frames_per_batch])[-1] print(test_images.shape) print(test_images_fgbg.shape) # - # ## Predict on test data # # Use the trained model to predict on new data and post-process the results into a label mask. # #### Threshold the foreground/background # threshold the foreground/background # and remove background from vector embedding fg_thresh = test_images_fgbg[..., 1] > 0.9 fg_thresh = np.expand_dims(fg_thresh, axis=-1) test_images_post_fgbg = test_images * fg_thresh # #### Mean Shift # + from itertools import cycle from sklearn.cluster import MeanShift, estimate_bandwidth def fit_mean_shift(images, index, frame, ndim, n_samples=500): test_plot = images[index, frame].reshape(-1, ndim) bandwidth = estimate_bandwidth(test_plot, n_samples=n_samples) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(test_plot) return ms # - # #### DB SCAN # + from sklearn.cluster import DBSCAN from sklearn import metrics def fit_dbscan(images, index, frame, ndim, eps=.15): test_plot = images[index, frame].reshape(-1, ndim) db = DBSCAN(eps=eps, min_samples=5, algorithm='kd_tree').fit(test_plot) return db # - # ### Plot the results # #### Scatter plots of the embedded vectors # + import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D index = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[0]) frame = 2#np.random.randint(low=0, high=test_images_post_fgbg.shape[1]) fig = plt.figure(figsize=(15, 15)) # 3D Vector Embedding ax = fig.add_subplot(221, projection='3d') test_plot = test_images[index, frame].reshape(-1, embedding_dim) ax.scatter(test_plot[:, 0], test_plot[:, 1], test_plot[:, 2], c='b', marker='o', s=5) ax.set_title('3D Vector Embedding') # 3D Vector Embedding sans Background ax = fig.add_subplot(222, projection='3d') test_plot = test_images_post_fgbg[index, frame].reshape(-1, 3) ax.scatter(test_plot[:, 0], test_plot[:, 1], test_plot[:, 2], c='b', marker='o', s=5) ax.set_title('3D Vector Embedding sans Background') # Scatter plot after MeanShift ms = fit_mean_shift(test_images_post_fgbg, index=index, frame=frame, ndim=embedding_dim, n_samples=2000) n_clusters_ms = np.unique(ms.labels_).size ax = fig.add_subplot(223, projection='3d') for k, col in zip(range(n_clusters_ms), cycle('bgrcmyk')): my_members = ms.labels_ == k ax.scatter(test_plot[my_members, 0], test_plot[my_members, 1], test_plot[my_members, 2], c=col, s=5) ax.set_title('MeanShift: %d estimated clusters' % n_clusters_ms) # Scatter plot after DBSCAN db = fit_dbscan(test_images_post_fgbg, index=index, frame=frame, ndim=embedding_dim, eps=0.2) # Number of clusters in labels, ignoring noise if present. n_clusters_db = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0) ax = fig.add_subplot(224, projection='3d') core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True # Scatter plot after DBScan unique_labels = set(db.labels_) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] for k, col in zip(unique_labels, colors): if k == -1: col = [0, 0, 0, 1] # Black used for noise. class_member_mask = (db.labels_ == k) xy = test_plot[class_member_mask & core_samples_mask] ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], 'o', c=col, s=5) xy = test_plot[class_member_mask & ~core_samples_mask] ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], 'o', c=col, s=5) ax.set_title('DBSCAN: %d estimated clusters' % n_clusters_db) plt.show() # - # #### Plot segmented images # + import matplotlib.pyplot as plt index = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[0]) frame = 0#np.random.randint(low=0, high=test_images_post_fgbg.shape[1]) fig, axes = plt.subplots(ncols=2, nrows=4, figsize=(15, 30), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(X_test[index, frame, ..., 0]) ax[0].set_title('Source Image') ax[1].imshow(test_images_fgbg[index, frame, ..., 1]) ax[1].set_title('Segmentation Prediction') ax[2].imshow(test_images[index, frame, ..., 0], cmap='jet') ax[2].set_title('Vector Embedding: Component 0') ax[3].imshow(test_images[index, frame, ..., 1], cmap='jet') ax[3].set_title('Vector Embedding: Component 1') ax[4].imshow(test_images_post_fgbg[index, frame, ..., 0], cmap='jet') ax[4].set_title('Vector Embedding: Component 0 sans Background') ax[5].imshow(test_images_post_fgbg[index, frame, ..., 0], cmap='jet') ax[5].set_title('Vector Embedding: Component 1 sans Background') ms = fit_mean_shift(test_images_post_fgbg, index=index, frame=frame, ndim=3, n_samples=2000) ax[6].imshow(ms.labels_.reshape(test_images_post_fgbg.shape[2:-1]), cmap='jet') ax[6].set_title('MeanShift: %d Clusters' % np.unique(ms.labels_).size) db = fit_dbscan(test_images_post_fgbg, index=index, frame=frame, ndim=3, eps=0.2) ax[7].imshow(db.labels_.reshape(test_images_post_fgbg.shape[2:-1]), cmap='jet') ax[7].set_title('DBSCAN: %d Clusters' % (len(set(db.labels_)) - int(-1 in db.labels_))) fig.tight_layout() plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import os folderName = './picture/' fileName = os.listdir(folderName) fileName # folderName 에 있는 디렉토리(폴더)의 파일리스트를 자동으로 작성하는 파이썬 기본 #fileName = ['pic1.jpg', 'pic2.jpg', 'pic3.jpg'] txt = fileName[0] tmp = txt.split('.') tmp[1] # + from PIL import Image # PIL 이라는 폴더에서 image.py 를 연결 #openFileName = 0 #mage.opne(openFileName) i = 0 #print(folderName) #print(folderName[0]) print(fileName[0]) Image.open(folderName + fileName[i]) # - cnt = len(fileName) for i in range(cnt): # for 매개변수 in range(0 : 3 - 1) tmp = fileName[i].split('.')[1] if tmp == 'jpg': print(folderName + fileName[i]) #txt = 'pic1.jpg' for txt in fileName: print(txt[5:9]) def rightSplit(txt): start = len(txt) - 3 end = len(txt) print(start, '~', end) print(txt) print(txt[start : end]) aa = fileName[0] rightSplit(aa) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python3_7_6 # language: python # name: py3_7_6 # --- a = [10,20,30] print(a) b = [] print(b) c = list(range(1,11)) print(c) a = [5, 10, 3, 40] print(len(a)) a.append(10) print(a) a.insert(2,100) print(a) a.remove(10) print(a) a.sort() print(a) a.reverse() print(a) a = [242, 256, 237, 223, 263, 81, 46] print(a) n = len(a) print(n) # + my_sum = 0 for i in a: my_sum = my_sum + i print(my_sum) # + my_avg = my_sum / n print(my_avg) # - import matplotlib.pyplot as plt # + #x 축 데이터 저장하기 x_data = [ 'Mon', 'Tue', 'Wed', 'Thr', 'Fri', 'Sat', 'Sun'] #유동인구 제목 :Floating Population Data(1Week) plt.title("Floating Population Data(1week)",fontsize = 16) #x축(요일), y축(유동인구수) plt.xlabel("Day of the week",fontsize=12) plt.ylabel("Floating Population", fontsize=12) #산점도 그리기, (x축 : 요일, y축: 유동인구수) #각각에 좌표평면에 점으로 데이터를 표현 plt.scatter(x_data, a) #라인그래프 그리기 (x축 : 요일, y축: 유동인구수) plt.plot(x_data, a) #그래프그리기 #plt.plot(x_data, a, 'bo-') #그래프 보여주기 plt.show() # + weekday_size=5 weekday_sum = 0 weekday_avg = 0 #주중 합 구하기 for i in range(0,weekday_size): weekday_sum += a[i] #주중 평균 구하기 weekday_avg = weekday_sum/weekday_size #주중 데이터, 주중 합, 주중평균 구하기 print("weekday Data: ", a[0:5]) print("weekday Sum:", weekday_sum) print("weekday Average :", weekday_avg) # - import matplotlib.pyplot as plt # + plt.title("Floating Population Data(1week)", fontsize=16) #x축(요일), y축(유동인구수) plt.xlabel("Day of the week", fontsize=12) plt.ylabel("Floating Population", fontsize=12) #일주일 라인 그래프 그리기 plt.plot(x_data, a) #주중 선정도 그래플 그리기(red, 50) plt.scatter(x_data[0:5],a[0:5], c="red", s=50) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sqlalchemy from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect, func from sqlalchemy import Column, Float, Integer, String, Date from sqlalchemy.ext.declarative import declarative_base Base2 = declarative_base() import pandas as pd import numpy as np import getpass import sqlalchemy as db from matplotlib import style import matplotlib.pyplot as plt # - # connect to local database from sqlalchemy import create_engine pa = getpass.getpass("postgres user password: ") engine = create_engine(f'postgresql://postgres:{pa}@localhost:5432/SQL_Challenge_db') connection = engine.connect() metadata = db.MetaData() departments = pd.read_sql('select * from departments', connection) departments ### Import the Employees table" employees = pd.read_sql('select * from employees', connection) employees.head() #Convert salaries table to dataFrame salary = pd.read_sql_query('select * from salaries',connection) salary.head() #Convert title table to dataFrame title = pd.read_sql_query('select * from titles',connection) title.head() #by looking at the emp_no values it would appear that they do. salary.describe() # Create the inspector and connect it to the engine inspector = inspect(engine) # Collect the names of tables within the database inspector.get_table_names() # Using the inspector to print the column names within the 'employees' table and its types columns = inspector.get_columns('employees') for column in columns: print(column["name"], column["type"]) #create employees class class employees(Base2): __tablename__ = 'employees' emp_no = Column(Integer, primary_key=True) birth_date = Column(Date) first_name = Column(String) last_name = Column(String) sex = Column(String) hire_date = Column(Date) session = Session(engine) # + # print sums by gender male = session.query(employees).filter_by(sex = 'M').count() female = session.query(employees).filter_by(sex = 'F').count() print(male) print(female) # - # Using the inspector to print the column names within the 'Salaries' table and its types columns = inspector.get_columns('salaries') for column in columns: print(column["name"], column["type"]) #create salaries class class salaries(Base2): __tablename__ = 'salaries' emp_no = Column(Integer, primary_key=True) salary = Column(Integer) # + #query the salaries table x = session.query(salaries.salary) # Plot the Results in a Matplotlib bar chart df = pd.DataFrame(x, columns=['salary']) a = np.array(df) x = a[0:,0] # + fig, ax = plt.subplots(figsize=(12, 8)) # the histogram of the data n, bins, patches = plt.hist(x, 12, facecolor='red', alpha=0.75) plt.xlabel('Salary') plt.ylabel('Frequency') plt.title('Histogram of Employee Salaries') plt.grid(True) plt.show() fig.savefig('Employee_Salary_Histogram.png') # - # Using the inspector to print the column names within the 'titles' table and its types columns = inspector.get_columns('titles') for column in columns: print(column["name"], column["type"]) #create titles class class titles(Base2): __tablename__ = 'titles' title_id = Column(Integer, primary_key=True) title = Column(String) session.query(titles.title).distinct().all() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/davemlz/eemont/blob/master/docs/tutorials/016-Spectral-Indices-From-Awesome-Spectral-Indices-List.ipynb) # + [markdown] id="jZEthLln92Ep" # # Spectral Indices From the [Awesome Spectral Indices for GEE](https://github.com/davemlz/awesome-ee-spectral-indices) # + [markdown] id="dNa470OZ8Oec" # _Tutorial created by ****_: [GitHub](https://github.com/davemlz) | [Twitter](https://twitter.com/dmlmont) # # - GitHub Repo: [https://github.com/davemlz/eemont](https://github.com/davemlz/eemont) # - PyPI link: [https://pypi.org/project/eemont/](https://pypi.org/project/eemont/) # - Conda-forge: [https://anaconda.org/conda-forge/eemont](https://anaconda.org/conda-forge/eemont) # - Documentation: [https://eemont.readthedocs.io/](https://eemont.readthedocs.io/) # - More tutorials: [https://github.com/davemlz/eemont/tree/master/docs/tutorials](https://github.com/davemlz/eemont/tree/master/docs/tutorials) # + [markdown] id="CD7h0hbi92Er" # ## Let's start! # + [markdown] id="E0rc6Cya92Es" # If required, please uncomment: # + id="NYzyvKtk92Es" # #!pip install eemont # #!pip install geemap # + [markdown] id="x3Rm3qt_92Et" # Import the required packges. # + id="H0C9S_Hh92Et" import ee, eemont, geemap import geemap.colormaps as cm # + [markdown] id="k1sdX2p592Eu" # Authenticate and Initialize Earth Engine and geemap. # + id="7QDXqVwy8Oef" Map = geemap.Map() # - # Let's define a point of interest: poi = ee.Geometry.PointFromQuery("El Cairo, Egypt",user_agent = "eemont-tutorial-016") # Let's preprocess the S2 dataset: S2 = (ee.ImageCollection("COPERNICUS/S2_SR") .filterBounds(poi) .filterDate("2020-01-01","2021-01-01") .maskClouds() .scaleAndOffset()) # ## New method for computing Spectral Indices # In order to compute Spectral Indices from the Awesome List of Spectral Indices, please use the `spectralIndices` method for ee.Image and ee.ImageCollection classes (This method will eventually replace the `index` method): S2offline = S2.spectralIndices(["NDVI","kNDVI","NDWI","SeLI"]).median() # The `spectralIndices` method will look into the Awesome List of Spectral Indices that comes with the eemont package (offline). If the list is outdated (the local list is updated with every new version or with the dev installation), you can use the `online` argument to use the most recent list directly from the [Awesome Spectral Indices for GEE repositoy](https://github.com/davemlz/awesome-ee-spectral-indices): S2online = S2.spectralIndices(["NDVI","kNDVI","NDWI","SeLI"],online = True).median() # The `spectralIndices` has new arguments for some of the new indices that were added to the list: # # - cexp (float, default = 1.16) – Exponent used for OCVI. # - nexp (float, default = 2.0) – Exponent used for GDVI. # - alpha (float, default = 0.1) – Weighting coefficient used for WDRVI. # - slope (float, default = 1.0) – Soil line slope. # - intercept (float, default = 0.0) – Soil line intercept. # Check the [eemont API Reference](https://eemont.readthedocs.io/en/latest/classes/index.html) and the [Awesome Spectral indices Documentation](https://awesome-ee-spectral-indices.readthedocs.io/en/latest/) for more info. # Now, let's visualize our indices! # + vegetation_vis = { "min": 0, "max": 1, "palette": cm.palettes.ndvi } water_vis = { "min": 0, "max": 1, "palette": cm.palettes.ndwi } # - # Visualize everything with `geemap`: Map.addLayer(S2online.select("NDVI"),vegetation_vis,"NDVI") Map.addLayer(S2online.select("kNDVI"),vegetation_vis,"kNDVI") Map.addLayer(S2online.select("SeLI"),vegetation_vis,"SeLI") Map.addLayer(S2online.select("NDWI"),water_vis,"NDWI") Map.centerObject(poi,8) Map # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # An example of k-nearest neighbors classification # # This notebook is a brief explanation of how we can build a non parametric Machine Learning model in Python using [scikit-learn](http://scikit-learn.org). If you are unfamiliar with kNN classification, you can start by reading on [Wikipedia](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm), or watch this very interesting [video](https://www.youtube.com/watch?v=4ObVzTuFivY) to get up to speed. # # This example is built on the dataset provided for the [Kaggle competition](https://www.kaggle.com/c/titanic) "Titanic: Machine Learning for disaster". We are basically given information on the people who were on board, and we must build a model to predict what sort of people would have a better chance of survival ([spoiler](https://en.wikipedia.org/wiki/Women_and_children_first)). # # This is a classic example of a binary classification problem, since there are only two possible outcomes for each person. kNN is not the most sexy classification algorithm out there, but its simplicity can be a strong advantage in certain cases. In this example, we will use this method to predict whether a person will survive or not based on their age, sex and the class of their ticket. So... let's do it. # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns sns.set(style="white") # Importing the dataset dataset = pd.read_csv('../Data/train_titanic.csv') dataset = dataset[dataset['Age'].notnull()] cat_cols = ['Sex' ] dataset = pd.get_dummies(dataset, columns = cat_cols) X = dataset[['Pclass','Age','Sex_male']].values y = dataset['Survived'].values # ### Training set and Test set # Kaggle gives us a test set, but for this example we'd rather split it ourselves so we can visualize how the model is performing. We will keep 80% of the data in the training set. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # ### Feature scaling # We apply feature scaling to normalize the range of each variable. This ensures that each feature contributes approximately the same to the distances computed in the objective function. Note that both the training and the test set must be scaled. from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # ### Fitting Logistic Regression to the Training set # Once the data is clean and ready, we can build the classifier and fit it to the training data. from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors =10) classifier.fit(X_train, y_train) # ### Predicting on the test set # Once the model has learned from the training data, we can make predictions on the test data using predict. # Predicting the Test set results y_pred = classifier.predict(X_test) # ### Assessing the model's performance # There are several ways to assess how good our predictions are. One of them is the [confusion matrix](http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/). This will quickly let us see how many good and bad predictions the model is making. Alternatively, we can use the accuracy score, which gives us the ratio of correctly classified samples. Note that we could change the accuracy score to account for the difference in the sample size for each possible outcome. # Making the Confusion Matrix from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) #Compute the accuracy score acc_score = accuracy_score(y_test, y_pred) acc_score # ### Awesome! # So we have built a model with a fairly good accuracy (around 80%). I am sure we can do better, but we can be proud of these numbers with a linear model that uses only three features. Of course, we can do a lot better, but we are still working our way up to more complex (and maybe more accurate) algorithms. # ## Visualizing our results # We have used the accuracy score to assess our model's performance. This is very useful, but plotting the results is a more exciting way of looking at our work. So let's do that. We will first plot the predicted results, together with the decision boundary. # # kNN is a non linear classifier, and plotting the decision boundary is slightly trickier than simply plotting a plane. I have opted to show you both regions in the second plot by scattering points uniformly across the features space. # + from mpl_toolkits.mplot3d import axes3d fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111, projection='3d') died = np.where(y_test==0)[0] survived = np.where(y_test==1)[0] p=ax.scatter(xs = X_test[died,0], ys =X_test[died,1], zs=X_test[died,2], zdir='z', s=20, c='red',label = 'Died') p=ax.scatter(xs = X_test[survived,0], ys =X_test[survived,1], zs=X_test[survived,2], zdir='z', s=20, c='blue',label = 'Survived') ax.set_xlabel('Class') ax.set_ylabel('Age') ax.set_zlabel('Sex') ax.legend() #ax.set_zticks([-1.33117269, 0.75121734] ax.zaxis.set_ticks(np.unique(X_test[:,2])) ax.zaxis.set_ticklabels(['Female','Male']) ax.xaxis.set_ticks(np.unique(X_test[:, 0])) ax.xaxis.set_ticklabels(['1','2','3']) plt.show() # - # ## Visualizing our results (II) # Since we know what our model is predicting on each side of the decision boundary, we can color the points using the actual data. This will let us visualize where our model is failing. # + # Decision Boundaries from mpl_toolkits.mplot3d import axes3d fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111, projection='3d') x_surf = np.linspace(np.min(X_test[:,0]), np.max(X_test[:,0]), 30) y_surf = np.linspace(np.min(X_test[:,1]), np.max(X_test[:,1]),30) z_surf = np.linspace(np.min(X_test[:,2]), np.max(X_test[:,2]),30) x_surf, y_surf, z_surf = np.meshgrid(x_surf,y_surf, z_surf) predicted_volume = classifier.predict(np.array([x_surf.ravel(), y_surf.ravel(), z_surf.ravel()]).T).reshape(x_surf.shape) died_volume = np.where(np.asarray(predicted_volume.ravel())==0) survived_volume = np.where(np.asarray(predicted_volume.ravel())==1) ax.scatter(x_surf.ravel()[died_volume[0]], y_surf.ravel()[died_volume[0]], z_surf.ravel()[died_volume[0]], zdir='z', s=0.5, c='red',label = 'Died',depthshade=True) ax.scatter(x_surf.ravel()[survived_volume[0]], y_surf.ravel()[survived_volume[0]], z_surf.ravel()[survived_volume[0]], zdir='z', s=0.5, c='blue',label = 'Survived',depthshade=True) ax.set_xlabel('Class') ax.set_ylabel('Age') ax.set_zlabel('Sex') ax.legend() #ax.set_zticks([-1.33117269, 0.75121734] ax.zaxis.set_ticks(np.unique(X_test[:,2])) ax.zaxis.set_ticklabels(['Female','Male']) ax.xaxis.set_ticks(np.unique(X_test[:,0])) ax.xaxis.set_ticklabels(['1','2','3']) p=ax.scatter(xs = X_test[died,0], ys =X_test[died,1], zs=X_test[died,2], zdir='z', s=20, c='k',label = 'Died') p=ax.scatter(xs = X_test[survived,0], ys =X_test[survived,1], zs=X_test[survived,2], zdir='z', marker = '^', s=20, c='k',label = 'Survived') plt.show() # - # We have demonstrated how to put this simple algorithm to work. As we have mentioned, it is very simple and it can be useful when we cannot make any assumption on the parameters. There are a couple of things to read on before using this method for a particular problem: # - You may be affected by the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality), which can make this method inappropiate for problems with a large number of features. # - We have mentioned that the algorithm is instance-based (or lazy). This means that there is not a training step as such, and the model will "keep" the training data in memory and make use of it to make predictions. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import requests as rq import json import urllib.parse as urlparse from urllib.parse import parse_qs from urllib.parse import unquote from datetime import datetime # Reference page # https://docs.microsoft.com/en-us/rest/api/cost-management/retail-prices/azure-retail-prices # + # Helper method for extracting the number of the next skip value def extract_skip(url): decoded = unquote(url) parsed = urlparse.urlparse(decoded) result = parse_qs(parsed.query)['$skip'][0] return int(result) def read_page(items, url): reponse = rq.get(url) page = json.loads(reponse.text) items += page['Items'] next_page = page['NextPageLink'] return items, next_page # - def read_and_extract(items, next_page): while next_page: items, next_page = read_page(items, next_page) if not next_page: break # only print for every 1000 pages skip = extract_skip(next_page) if skip % 1000 == 0 : now = datetime.now() dt_string = now.strftime('%H:%M:%S') print (dt_string, ': ', next_page) print('===== Done ======') return items # ### Compute compute_url = "https://prices.azure.com/api/retail/prices?currencyCode='USD'&$filter=serviceFamily%20eq%20'Compute'" items = [] items, next_page = read_page(items, compute_url) items = read_and_extract(items, next_page) # pickle azure_data = pd.DataFrame(items) azure_data.to_pickle('./data/azure_src.compute.xz.pkl', compression='xz') azure_src = './data/azure_src.compute.xz.pkl' azure_data = pd.read_pickle(azure_src, compression="xz") azure_data # ### Storage storage_url = "https://prices.azure.com/api/retail/prices?$filter=serviceName%20eq%20%27Storage%27" items = [] items, next_page = read_page(items, storage_url) items = read_and_extract(items, next_page) # pickle storage_data = pd.DataFrame(items) storage_data.to_pickle('./data/azure.storage.xz.pkl', compression='xz') storage_data.head(10) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&offest=50&market=US') big_data = response.json() # - len(big_data['artists']) FIRST PAGE 'https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&offest=0&market=US' SECOND PAGE 'https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&offest=50&market=US' THIRD PAGE 'https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&offest=150&market=US' FOURTH PAGE 'https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&offest=200&market=US' # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np # ^^^ pyforest auto-imports - don't write above this line from copy import deepcopy from tqdm.auto import tqdm from cfg import TrainCfg, TrainCfg_ns, ModelCfg, ModelCfg_ns from model import ECG_CRNN_CINC2021 from dataset import CINC2021 from torch_ecg.model_configs.cnn import resnet_nature_comm_bottle_neck_se import seaborn as sns from gather_results import gather_from_checkpoint, test_inference_speed from cfg import twelve_leads, six_leads, four_leads, three_leads, two_leads import time # %load_ext autoreload # %autoreload 2 # - from scipy.signal import buttord from matplotlib.pyplot import cm sns.set() colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] markers = ["+", "v", "x", "*", "p", "d", "s", "$\heartsuit$"] marker_size = 9 plt.rcParams['xtick.labelsize']=28 plt.rcParams['ytick.labelsize']=28 plt.rcParams['axes.labelsize']=40 plt.rcParams['legend.fontsize']=24 # + # plt.rcParams # - # ## post challenge results df_pcr = pd.read_csv("./official_results/Post_Challenge_PMEA_test_results.csv") df_pcr sources = ["Total Test", "training", "validation", "CPSC2", "G12EC", "UMich", "Undisclosed",] # + fig, ax = plt.subplots(figsize=(16,12)) # for idx, (source, df_s) in enumerate(df_pcr.groupby("Data Source")): for idx, source in enumerate(sources): df_s = df_pcr[df_pcr["Data Source"]==source] df_s = df_s.sort_values("Lead Combination", ascending=False).reset_index(drop=True) if idx > 0: ls = "--" markersize = marker_size linewidth = 2 else: ls = "-" markersize = marker_size + 4 linewidth = 4 ax.plot( df_s["Challenge Metric"], c=colors[idx], marker=markers[idx], markersize=markersize, label=source, ls=ls, linewidth=linewidth, ) ax.set_ylim(0,1) ax.set_xlabel("Lead-Set") ax.set_xticks(np.arange(5)) ax.set_xticklabels([f"{l}-leads" for l in [12,6,4,3,2]], rotation=20) ax.tick_params(axis = 'both') ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="best", ncol=3) plt.savefig("./images/post-challenge.svg", dpi=1200, bbox_inches="tight", transparent=False) plt.savefig("./images/post-challenge.pdf", dpi=1200, bbox_inches="tight", transparent=False) # - # # pre-load datasets # + # ds_val = CINC2021(TrainCfg_ns, training=False, lazy=False) # - ds_train = CINC2021(TrainCfg_ns, training=True, lazy=True) # + # ds_train._load_all_data() # - ECG_CRNN_CINC2021.__DEBUG__ = False from torch_ecg.utils.misc import dict_to_str, get_record_list_recursive3 from gather_results import append_model_config_if_needed # + from gather_results import gather_from_checkpoint from dataset import CINC2021 # %load_ext autoreload # %autoreload 2 # - from torch_ecg.utils.misc import MovingAverage ma = MovingAverage() append_model_config_if_needed() # # gather statistics results_dir = os.path.join(os.path.dirname(TrainCfg.log_dir), "results") results_dir l_csv = [ os.path.join(results_dir,item+".csv") \ for item in get_record_list_recursive3("/home/wenhao/Jupyter/wenhao/workspace/cinc2021/results/", "TorchECG.*\.csv") ] eval_models = True res = {} # with tqdm(l_csv) as t: for fp in t: df_fp = pd.read_csv(fp) zs = "" ls = "" lr = "-adaptive" loss = "-bce" mixup = "" cnn_name = "" rnn_name = "" attn_name = "" bp = "" with open(fp.replace("csv", "txt"), "r") as txt: lines = txt.read().splitlines()[-1000:] model_fp = None for l in lines: tmp = re.findall("/.*BestModel.*\.pth\.tar", l) if len(tmp) > 0: model_fp = tmp[0] if model_fp is None: print(f"{fp} has no corr. model") continue model, train_cfg = ECG_CRNN_CINC2021.from_checkpoint(model_fp) print("model loaded from checkpoint") if "normalize" in train_cfg: zs = "-zscore" if "mixup" in train_cfg: mixup = "-mixup" if "label_smooth" in train_cfg and train_cfg["label_smooth"]: ls = "-label_smooth" if train_cfg["loss"] == "AsymmetricLoss": loss = "-asymmetric" if train_cfg["lr_scheduler"] in ["one_cycle", "onecycle"]: lr = "-onecycle" if train_cfg["bandpass"] is None: bp = "-no_bp" cnn_name = model.config.cnn.name if model.config.rnn.name != "none": rnn_name = "-"+model.config.rnn.name if model.config.attn.name != "none": attn_name = "-"+model.config.attn.name n_linear = len([m for m in model.clf if m.__class__.__name__=='Linear']) clf = f"-{n_linear}linear" name = f"{cnn_name}{rnn_name}{attn_name}{clf}{zs}{ls}{mixup}{loss}{lr}{bp}" train_loss = df_fp[df_fp.part=="train"][["epoch", "step", "loss", "time"]].dropna() train_cm = df_fp[df_fp.part=="train"][["epoch", "step", "challenge_metric"]].dropna() val_cm = df_fp[df_fp.part=="val"][["epoch", "step", "challenge_metric"]].dropna() time_used = [] for ep, df_gp in train_loss.groupby("epoch"): time_used.append(np.diff(pd.to_datetime(df_gp["time"]).values)) time_used = np.concatenate(time_used)/np.timedelta64(1, 's') print("stats gathered") time.sleep(5) if eval_models: if len(train_cfg.leads) < 12: ds_use = CINC2021.from_extern(ds_train, train_cfg) else: ds_use = ds_train time.sleep(5) inf_speed = test_inference_speed(model_fp, ds_use) else: inf_speed = np.nan time.sleep(5) print("inference speed computed") res[fp] = { "folder": list(filter(lambda s: len(s)>0, os.path.dirname(fp).split(os.path.sep)))[-1], "name": name, "cnn_name": model.config.cnn.name, "rnn_name": model.config.rnn.name, "attn_name": model.config.attn.name, "n_linear": n_linear, "n_leads": len(train_cfg.leads), "train_loss": train_loss, "train_cm": train_cm, "val_cm": val_cm, "size": model.module_size, "size_h": model.module_size_, "val_cm_max": val_cm.challenge_metric.max(), "speed": round((64*20/time_used).mean()), "inf_speed": inf_speed, } del model if eval_models: if ds_use is not ds_train: del ds_use torch.cuda.empty_cache() print(f"{name} finished") print("#"*80 + "\n"*2) len(res) df_res = pd.DataFrame(res.values()) df_res.name.values df_res["efficiency"] = df_res["val_cm_max"] / df_res["size"] * 1e8 df_res["val_cm_max"] = df_res["val_cm_max"].apply(lambda s: round(s,4)) df_res["size"] = df_res["size"].apply(lambda s: round(s/1000000, 2)) df_res["efficiency"] = df_res["efficiency"].apply(lambda s: round(s,3)) df_res = df_res.sort_values("val_cm_max", ascending=False).reset_index(drop=True) df_res # # 5 sets of ablation studies df_aba_1 = df_res[df_res.name.str.startswith("resnet_nature_comm_se-lstm-se-2linear") & (df_res.n_leads==12)].reset_index(drop=True) # df_aba_1 = df_aba_1.sort_values("val_cm_max") df_aba_1 df_aba_1.name.tolist() # + fig, ax = plt.subplots(figsize=(16,12)) for idx, row in df_aba_1.iterrows(): train_loss = row.train_loss train_cm = row.train_cm val_cm = row.val_cm if "bce" in row["name"]: ax.plot([]) continue label = row["name"].replace("resnet_nature_comm_se-lstm-se-2linear-", "") ax.plot( (train_loss.index.values*20)[::18], (ma(train_loss.loss, weight=0.6))[::18], label=label, c=colors[idx], marker=markers[idx], markersize=marker_size, ) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Loss (n.u.)") ax.tick_params(axis = 'y', rotation = 45) ax.set_ylim(0,0.2) ax.legend(loc="upper right") fig.tight_layout() plt.savefig("./images/abla1_loss.svg", dpi=1200, bbox_inches="tight", transparent=False) plt.savefig("./images/abla1_loss.pdf", dpi=1200, bbox_inches="tight", transparent=False) # + fig, ax = plt.subplots(figsize=(16,16)) belt1, belt2 = [], [] for idx, row in df_aba_1.iterrows(): train_cm = row.train_cm label = row["name"].replace("resnet_nature_comm_se-lstm-se-2linear-", "train-") ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label=label, c=colors[idx], marker=markers[idx], markersize=marker_size, ) if "bce" not in row["name"]: belt1.append(train_cm.challenge_metric.values) belt1 = np.array(belt1) for idx, row in df_aba_1.iterrows(): val_cm = row.val_cm label = row["name"].replace("resnet_nature_comm_se-lstm-se-2linear-", "val-") ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label=label, c=colors[idx], marker=markers[idx], markersize=marker_size, ) if "bce" not in row["name"]: belt2.append(val_cm.challenge_metric.values) belt2 = np.array(belt2) ax.fill_between(train_cm.index.values*20, np.mean(belt1,axis=0)-0.06, np.mean(belt1,axis=0)+0.06, color='r', alpha=.15) ax.fill_between(train_cm.index.values*20, np.mean(belt2,axis=0)-0.028, np.mean(belt2,axis=0)+0.028, color='g', alpha=.2) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.set_ylim(0.23,0.97) ax.legend(loc="lower right", fontsize=20) fig.tight_layout() plt.savefig("./images/abla1_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla1_cm.svg", dpi=1200, bbox_inches="tight") # - abla_2 = [ 'resnet_nature_comm_se-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'resnet_nature_comm_se-se-2linear-zscore-mixup-asymmetric-onecycle', 'resnet_nature_comm_se-2linear-zscore-mixup-asymmetric-onecycle', 'resnet_nature_comm_se-1linear-zscore-mixup-asymmetric-onecycle' ] df_abla2 = df_res[(df_res.name.isin(abla_2)) & (df_res.n_leads==12)].reset_index(drop=True) df_abla2 # + fig, ax = plt.subplots(figsize=(16,12)) belt1, belt2 = np.full((len(abla_2),50), np.nan), np.full((len(abla_2),50), np.nan) for idx, row in df_abla2.iterrows(): train_cm = row.train_cm ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label=row["name"].replace("resnet_nature_comm_se-", "train-").replace("-zscore-mixup-asymmetric-onecycle", ""), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt1[idx,:len(train_cm.challenge_metric)] = train_cm.challenge_metric.values for idx, row in df_abla2.iterrows(): val_cm = row.val_cm ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label=row["name"].replace("resnet_nature_comm_se-", "val-").replace("-zscore-mixup-asymmetric-onecycle", ""), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt2[idx,:len(val_cm.challenge_metric)] = val_cm.challenge_metric.values ax.fill_between(train_cm.index.values*20, np.nanmean(belt1,axis=0)-0.06, np.nanmean(belt1,axis=0)+0.06, color='r', alpha=.15) ax.fill_between(train_cm.index.values*20, np.nanmean(belt2,axis=0)-0.032, np.nanmean(belt2,axis=0)+0.026, color='g', alpha=.2) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="lower right") ax.set_ylim(0.25,1) fig.tight_layout() plt.savefig("./images/abla2_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla2_cm.svg", dpi=1200, bbox_inches="tight") # - abla_3 = ['resnet_nature_comm_se-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'resnet_nature_comm_bottle_neck_se-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'tresnetN-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'tresnetP-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'tresnetF-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'multi_scopic-lstm-se-2linear-zscore-mixup-asymmetric-onecycle', 'multi_scopic_leadwise-lstm-se-2linear-zscore-mixup-asymmetric-onecycle',] # + df_abla3 = df_res[(df_res.name.isin(abla_3)) & (df_res.n_leads==12) & (~df_res.folder.str.contains("1127|nas"))].reset_index(drop=True) df_abla3["ordering"] = df_abla3.name.apply(lambda s: abla_3.index(s)) df_abla3 = df_abla3.sort_values(by="ordering").reset_index(drop=True) # - df_abla3 name_map = { "resnet_nature_comm_se": "ResNet_NC_SE", "resnet_nature_comm_bottle_neck_gc": "ResNet_NC_BG", "multi_scopic": "branched", "multi_scopic_leadwise": "branched_leadwise", "resnet_nature_comm_bottle_neck_se": "ResNet_NC_BS", "tresnetN": "TResNet-N", "tresnetP": "TResNet-P", "tresnetF": "TResNet-F", } # + fig, ax = plt.subplots(figsize=(16,12)) belt1, belt2 = np.full((len(abla_3),50), np.nan), np.full((len(abla_3),50), np.nan) for idx, row in df_abla3.iterrows(): train_cm = row.train_cm if len(train_cm) == 50: _train_cm = train_cm name = row["name"].replace("-lstm-se-2linear-zscore-mixup-asymmetric-onecycle", "") ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label="train-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt1[idx,:len(train_cm.challenge_metric)] = train_cm.challenge_metric.values for idx, row in df_abla3.iterrows(): val_cm = row.val_cm name = row["name"].replace("-lstm-se-2linear-zscore-mixup-asymmetric-onecycle", "") ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label="val-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt2[idx,:len(val_cm.challenge_metric)] = val_cm.challenge_metric.values ax.fill_between(_train_cm.index.values*20, np.nanmean(belt1,axis=0)-0.06, np.nanmean(belt1,axis=0)+0.06, color='r', alpha=.15) ax.fill_between(_train_cm.index.values*20, np.nanmean(belt2,axis=0)-0.03, np.nanmean(belt2,axis=0)+0.028, color='g', alpha=.2) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="lower right", ncol=2) ax.set_ylim(0.23,1) fig.tight_layout() plt.savefig("./images/abla3_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla3_cm.svg", dpi=1200, bbox_inches="tight") # - df_abla3 df_abla3.at[1,"val_cm"].challenge_metric.max() df_abla3.at[6,"val_cm"].challenge_metric.max() for idx, row in df_abla3.iterrows(): print(row["name"], df_abla3.at[idx,"val_cm"].challenge_metric.max()) abla_4 = [ 'resnet_nature_comm_se-1linear-zscore-mixup-asymmetric-onecycle', 'resnet_nature_comm_bottle_neck_se-1linear-zscore-mixup-asymmetric-onecycle', 'tresnetN-1linear-zscore-mixup-asymmetric-onecycle', 'tresnetP-1linear-zscore-mixup-asymmetric-onecycle', 'tresnetF-1linear-zscore-mixup-asymmetric-onecycle', 'multi_scopic-1linear-zscore-mixup-asymmetric-onecycle', 'multi_scopic_leadwise-1linear-zscore-mixup-asymmetric-onecycle', ] df_abla4 = df_res[(df_res.name.isin(abla_4)) & (df_res.n_leads==12) & (~df_res.folder.str.contains("nas"))].reset_index(drop=True) df_abla4["ordering"] = df_abla4.name.apply(lambda s: abla_4.index(s)) df_abla4 = df_abla4.sort_values(by="ordering").reset_index(drop=True) df_abla4 # + fig, ax = plt.subplots(figsize=(16,12)) belt1, belt2 = np.full((len(abla_4),50), np.nan), np.full((len(abla_4),50), np.nan) _train_cm = pd.DataFrame() for idx, row in df_abla4.iterrows(): train_cm = row.train_cm if len(train_cm) > len(_train_cm): _train_cm = train_cm name = row["name"].replace("-1linear-zscore-mixup-asymmetric-onecycle", "") ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label="train-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt1[idx,:len(train_cm.challenge_metric)] = train_cm.challenge_metric.values belt1 = belt1[...,:len(_train_cm)] belt2 = belt2[...,:len(_train_cm)] for idx, row in df_abla4.iterrows(): val_cm = row.val_cm name = row["name"].replace("-1linear-zscore-mixup-asymmetric-onecycle", "") ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label="val-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt2[idx,:len(val_cm.challenge_metric)] = val_cm.challenge_metric.values ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="lower right", ncol=2) ax.set_ylim(0.26,1.01) fig.tight_layout() plt.savefig("./images/abla4_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla4_cm.svg", dpi=1200, bbox_inches="tight") # + # abla5 leads 12-2, resnet-nc-se, lstm, se, 2-linear, asymmetric loss, one cycle 1e-4 to 2e-3 # - from mpl_toolkits.axes_grid1.inset_locator import inset_axes df_abla5 = df_res[(df_res.n_leads<12) | (df_res.name == 'resnet_nature_comm_se-lstm-se-2linear-zscore-mixup-asymmetric-onecycle')].reset_index(drop=True) df_abla5 = df_abla5.sort_values("n_leads", ascending=False).reset_index(drop=True) df_abla5 # + import matplotlib.patches as patches from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset, InsetPosition max_cm = {} fig, ax = plt.subplots(figsize=(16,12)) belt1, belt2 = np.full((len(df_abla5),50), np.nan), np.full((len(df_abla5),50), np.nan) _train_cm = pd.DataFrame() for idx, row in df_abla5.iterrows(): train_cm = row.train_cm if len(train_cm) > len(_train_cm): _train_cm = train_cm ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label=f"train-{row.n_leads}-leads", c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt1[idx,:len(train_cm.challenge_metric)] = train_cm.challenge_metric.values for idx, row in df_abla5.iterrows(): val_cm = row.val_cm name = row["name"].replace("-1linear-zscore-mixup-asymmetric-onecycle", "") ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label=f"val-{row.n_leads}-leads", c=colors[idx], marker=markers[idx], markersize=marker_size, ) max_cm[row.n_leads] = val_cm.challenge_metric.max() belt2[idx,:len(val_cm.challenge_metric)] = val_cm.challenge_metric.values ax.fill_between(_train_cm.index.values*20, np.nanmean(belt1,axis=0)-0.042, np.nanmean(belt1,axis=0)+0.042, color='r', alpha=.15) ax.fill_between(_train_cm.index.values*20, np.nanmean(belt2,axis=0)-0.03, np.nanmean(belt2,axis=0)+0.03, color='g', alpha=.2) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="upper left", ncol=2) ax.set_ylim(0.2,1.05) rect = patches.Rectangle((42000, 0.62), 14200, 0.08, facecolor="none", edgecolor="black", ls="-", lw=3) ax.add_patch(rect) axin = inset_axes( ax, width="35%", height="30%", loc=4, borderpad=3, ) axin.plot([max_cm[l] for l in df_abla5.n_leads.values], color="black", marker="o", markersize=8) axin.set_ylim(0.62,0.72) axin.set_xlabel("Lead-Set", fontsize=22) axin.set_xticks(np.arange(len(df_abla5.n_leads.values))) axin.set_xticklabels([f"{l}-leads" for l in df_abla5.n_leads.values]) axin.tick_params(axis = 'both', labelsize = 16) axin.set_ylabel("Challenge Metric (n.u.)", fontsize=22) axin.grid() # axin.tick_params(axis = 'y', rotation = 45) axin.plot((2), max_cm[4], marker='o', markersize=10, color='r', ) axin.plot((3), max_cm[3], marker='o', markersize=10, color='r', ) for idx, l in enumerate(df_abla5.n_leads.values): c = "black" if idx not in [2,3] else "red" axin.text(idx-0.2, max_cm[l]+0.004, f"{max_cm[l]:.3f}", fontsize=18, color=c) rect = patches.Rectangle((28800, 0.22), 27200, 0.335, facecolor="none", edgecolor="black", ls="-", lw=3) ax.add_patch(rect) ax.annotate('', xytext=(49000, 0.62), xy=(46000, 0.552), arrowprops=dict(facecolor='black', shrink=0.06),) fig.tight_layout() plt.savefig("./images/abla5_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla5_cm.svg", dpi=1200, bbox_inches="tight") # - # ## new ablation (NAS) result df_new_abla = df_res[df_res.folder.str.contains("nas")].reset_index(drop=True) df_new_abla["name"] = df_new_abla["name"].apply(lambda s: s.replace("resnet_nature_comm_bottle_neck_se-", "").replace("-zscore-label_smooth-mixup-asymmetric-onecycle", "")) df_new_abla = df_new_abla.sort_values("val_cm_max", ascending=False).reset_index(drop=True) df_new_abla = df_new_abla.drop(["folder", "n_leads"], axis=1) df_new_abla # + fig, ax = plt.subplots(figsize=(16,12)) belt1, belt2 = np.full((len(df_new_abla),50), np.nan), np.full((len(df_new_abla),50), np.nan) _train_cm = pd.DataFrame() for idx, row in df_new_abla.iterrows(): train_cm = row.train_cm if len(train_cm) > len(_train_cm): _train_cm = train_cm name = row["name"].replace("-zscore-mixup-asymmetric-onecycle", "") ax.plot( train_cm.index.values*20, train_cm.challenge_metric, label="train-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt1[idx,:len(train_cm.challenge_metric)] = train_cm.challenge_metric.values belt1 = belt1[...,:len(_train_cm)] belt2 = belt2[...,:len(_train_cm)] for idx, row in df_new_abla.iterrows(): val_cm = row.val_cm name = row["name"].replace("-zscore-mixup-asymmetric-onecycle", "") ax.plot( val_cm.index.values*20, val_cm.challenge_metric, ls='--', label="val-"+name_map.get(name,name), c=colors[idx], marker=markers[idx], markersize=marker_size, ) belt2[idx,:len(val_cm.challenge_metric)] = val_cm.challenge_metric.values ax.fill_between(_train_cm.index.values*20, np.nanmean(belt1,axis=0)-0.06, np.nanmean(belt1,axis=0)+0.06, color='r', alpha=.15) ax.fill_between(_train_cm.index.values*20, np.nanmean(belt2,axis=0)-0.02, np.nanmean(belt2,axis=0)+0.02, color='g', alpha=.2) ax.set_xlabel("Steps (n.u.)") ax.set_ylabel("Challenge Metric (n.u.)") ax.legend(loc="lower right", ncol=2) ax.set_ylim(0.23,1) fig.tight_layout() plt.savefig("./images/abla6_cm.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/abla6_cm.svg", dpi=1200, bbox_inches="tight") # - df_compare = df_res[(df_res.n_leads==12)&(df_res.rnn_name=="none")&(df_res.attn_name=="none")&(df_res.n_linear==1)&(df_res.name.str.endswith("-zscore-mixup-asymmetric-onecycle"))].reset_index(drop=True) df_compare df_compare = df_compare.iloc[[0,1,3,4,5,6,7,8]].reset_index(drop=True) df_compare = df_compare.sort_values("size",ascending=True).reset_index(drop=True) # + fig, ax = plt.subplots(figsize=(16,12)) # ax.set_xscale("log") df_tmp = df_compare[df_compare.cnn_name.str.startswith("resnet_nature")] ax.plot(df_tmp["size"], df_tmp.val_cm_max, c=colors[0]) for _, row in df_tmp.iterrows(): ax.plot(row["size"], row.val_cm_max, marker='o', markersize=18, c=colors[0]) if row.cnn_name == "resnet_nature_comm_se": x,y = row["size"]+0.2, row.val_cm_max-0.001 elif row.cnn_name == "resnet_nature_comm_bottle_neck_se": x,y = row["size"]-0.9, row.val_cm_max+0.002 elif row.cnn_name == "resnet_nature_comm_bottle_neck_gc": x,y = row["size"]+0.2, row.val_cm_max-0.002 ax.text(x,y, name_map[row.cnn_name],c=colors[0], fontsize=21 ) df_tmp = df_compare[df_compare.cnn_name.str.startswith("tresnet")] ax.plot(df_tmp["size"], df_tmp.val_cm_max, c=colors[1]) for _, row in df_tmp.iterrows(): ax.plot(row["size"], row.val_cm_max, marker='o', markersize=18, c=colors[1]) if row.cnn_name == "tresnetN": x,y = row["size"]-1.2, row.val_cm_max+0.0025 else: x,y = row["size"]+0.3, row.val_cm_max-0.003 ax.text(x,y, name_map[row.cnn_name],c=colors[1], fontsize=21 ) df_tmp = df_compare[df_compare.cnn_name.str.startswith("multi")] ax.plot(df_tmp["size"], df_tmp.val_cm_max, c=colors[2]) for _, row in df_tmp.iterrows(): ax.plot(row["size"], row.val_cm_max, marker='o', markersize=18, c=colors[2]) if row.cnn_name == "multi_scopic": x,y = row["size"]-0.4, row.val_cm_max-0.006 else: x,y = row["size"]-0.9, row.val_cm_max+0.003 ax.text(x,y, name_map[row.cnn_name],c=colors[2], fontsize=21 ) ax.set_ylim(0.62, 0.73) ax.set_xlabel("# Params (million)") ax.set_ylabel("Challenge Metric (n.u.)") fig.tight_layout() plt.savefig("./images/nn_compare.pdf", dpi=1200, bbox_inches="tight") plt.savefig("./images/nn_compare.svg", dpi=1200, bbox_inches="tight") # - # + train_config = deepcopy(TrainCfg_ns) train_config.rnn_name = "lstm" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_config.n_leads = len(train_config.leads) tranches = train_config.tranches_for_training if tranches: classes = train_config.tranche_classes[tranches] else: classes = train_config.classes if train_config.n_leads == 12: model_config = deepcopy(ModelCfg_ns.twelve_leads) elif train_config.n_leads == 6: model_config = deepcopy(ModelCfg_ns.six_leads) elif train_config.n_leads == 4: model_config = deepcopy(ModelCfg_ns.four_leads) elif train_config.n_leads == 3: model_config = deepcopy(ModelCfg_ns.three_leads) elif train_config.n_leads == 2: model_config = deepcopy(ModelCfg_ns.two_leads) model_config.cnn.name = train_config.cnn_name model_config.rnn.name = train_config.rnn_name model_config.attn.name = train_config.attn_name # - l_cnn = [ "resnet_nature_comm_se", "resnet_nature_comm_bottle_neck_se", "tresnetN", "tresnetP", "tresnetF", "multi_scopic", "multi_scopic_leadwise", ] # + ms = {} for name in l_cnn: model_config.cnn.name = name model = ECG_CRNN_CINC2021( classes=train_config.classes, n_leads=train_config.n_leads, config=model_config, ) ms[name] = model.cnn.module_size # - df_table = df_res[df_res.name.str.contains("-zscore-mixup-asymmetric-onecycle")][["name", "n_leads", "size", "size_h", "val_cm_max", "speed", "inf_speed", "efficiency"]].reset_index(drop=True) df_table.name = df_table.name.apply(lambda s: s.replace("-zscore-mixup-asymmetric-onecycle", "")) df_table[df_table.name.str.startswith("resnet_nature_comm_se")].reset_index(drop=True) # ## special detectors import json, gzip with gzip.open("./results/special_detector_test_results_xqrs.json.gz", "r") as f: content = json.load(f) cm_sd = ED() for c in ["LAD", "RAD", "PR", "LQRSV"]: cm_sd[c] = ED() cm_sd[c].tp = 0 cm_sd[c].fp = 0 cm_sd[c].fn = 0 cm_sd[c].tn = 0 for item in content["twelve_leads"]: if c in item["label"]: if c in item["pred"]: cm_sd[c].tp += 1 else: cm_sd[c].fn += 1 else: if c in item["pred"]: cm_sd[c].fp += 1 else: cm_sd[c].tn += 1 c = "brady" cm_sd[c] = ED() cm_sd[c].tp = 0 cm_sd[c].fp = 0 cm_sd[c].fn = 0 cm_sd[c].tn = 0 for item in content["twelve_leads"]: if "Brady" in item["label"] or "SB" in item["label"]: if c in item["pred"]: cm_sd[c].tp += 1 else: cm_sd[c].fn += 1 else: if c in item["pred"]: cm_sd[c].fp += 1 else: cm_sd[c].tn += 1 cm_sd [item for item in content["twelve_leads"] if "LAD" in item["pred"]] ds_train.reader.get_labels() ds_train.reader.plot( "A0226", # leads=["I", "II", "III", "aVR", "aVL", "aVF",], leads = ["I", "II", "aVF",], ticks_granularity=2, save_path="./working_dir/A0226.pdf" ) np.ones((12,)) np.maximum(0.5, np.ones((12,))) df_res df_sota = df_res[(df_res.cnn_name.str.contains("resnet_nature_comm_bottle_neck")) & (df_res.n_leads==12)].reset_index(drop=True) df_sota.name.tolist() df_sota ds_train.reader.df_stats from utils.scoring_aux_data import dx_mapping_scored, equiv_class_dict dx_mapping_scored = dx_mapping_scored.sort_values("Total", ascending=False).reset_index(drop=True) dx_mapping_scored # + fig, ax = plt.subplots(figsize=(12,16)) y_pos = np.arange(len(dx_mapping_scored)) ax.barh(y_pos, dx_mapping_scored.Total, align='center') ax.set_yticks(y_pos) ax.set_yticklabels(dx_mapping_scored.Abbreviation, fontsize=22) ax.invert_yaxis() # labels read top-to-bottom ax.xaxis.set_tick_params(labelsize=22) ax.set_xlabel('# records (n.u.)', fontsize=32) ax.set_xlim(0,32000) # ax.grid() for idx, row in dx_mapping_scored.iterrows(): ax.text(row.Total, idx+0.23, row.Total, fontsize=20) plt.savefig("./images/scored_classes_distribution.svg", dpi=1200, bbox_inches="tight", transparent=False) plt.savefig("./images/scored_classes_distribution.pdf", dpi=1200, bbox_inches="tight", transparent=False) # - for idx, row in dx_mapping_scored.iterrows(): print(f"{row.SNOMEDCTCode} & {row.Abbreviation} & {row.Dx.title()} \\\\") equiv_class_dict # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.016151, "end_time": "2020-10-25T05:33:25.665485", "exception": false, "start_time": "2020-10-25T05:33:25.649334", "status": "completed"} tags=[] # ## Parameter # + papermill={"duration": 0.014613, "end_time": "2020-10-25T05:33:25.690669", "exception": false, "start_time": "2020-10-25T05:33:25.676056", "status": "completed"} tags=["parameters"] # These parameters can be injected from Papermill model_type = "pre_ln" train_file = "wikitext-103-raw/wiki.train.raw" valid_file = "wikitext-103-raw/wiki.valid.raw" epochs = 10 batch_size = 2 max_learning_rate = 1e-4 warmup_steps = 0 save_model_dir = "tfchat_model" clipnorm = 1.0 fp16 = False # - # Assert parameters assert model_type in ["pre_ln", "post_ln", "min_gpt", "transformers"] # ## Installation # !apt install -y git # !pip install git+https://github.com/noriyukipy/tfchat@ad516f2 # + [markdown] papermill={"duration": 0.010568, "end_time": "2020-10-25T05:33:25.736850", "exception": false, "start_time": "2020-10-25T05:33:25.726282", "status": "completed"} tags=[] # ## Configure GPU # + papermill={"duration": 0.920123, "end_time": "2020-10-25T05:33:26.667484", "exception": false, "start_time": "2020-10-25T05:33:25.747361", "status": "completed"} tags=[] from tfchat.utils import set_memory_growth from tfchat.utils import set_mixed_precision_policy # + papermill={"duration": 0.55073, "end_time": "2020-10-25T05:33:27.228969", "exception": false, "start_time": "2020-10-25T05:33:26.678239", "status": "completed"} tags=[] set_memory_growth() # - if fp16: set_mixed_precision_policy() # + [markdown] papermill={"duration": 0.010916, "end_time": "2020-10-25T05:33:27.251283", "exception": false, "start_time": "2020-10-25T05:33:27.240367", "status": "completed"} tags=[] # ## Setup tokenizer # + papermill={"duration": 1.424651, "end_time": "2020-10-25T05:33:28.686639", "exception": false, "start_time": "2020-10-25T05:33:27.261988", "status": "completed"} tags=[] # Install transformers by HuggingFace to use GPT2 tokenizer # ! pip install transformers==3.4.0 # Enable widgetsnbextention to avoid the following error when running GPT2.from_pretrained method # ImportError: IProgress not found. Please update jupyter and ipywidgets. # ! jupyter nbextension enable --py widgetsnbextension # + papermill={"duration": 1.701922, "end_time": "2020-10-25T05:33:30.435314", "exception": false, "start_time": "2020-10-25T05:33:28.733392", "status": "completed"} tags=[] # setup tokenizer from transformers import GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # + [markdown] papermill={"duration": 0.012068, "end_time": "2020-10-25T05:33:30.459683", "exception": false, "start_time": "2020-10-25T05:33:30.447615", "status": "completed"} tags=[] # ## Prepare model config # + papermill={"duration": 0.084744, "end_time": "2020-10-25T05:33:30.556263", "exception": false, "start_time": "2020-10-25T05:33:30.471519", "status": "completed"} tags=[] from tfchat.configs import GPT2SmallConfig config = GPT2SmallConfig() # Set the larger number of vocab size than 33,278, which is the vocab size of Wikitext-2 config.vocab_size = tokenizer.vocab_size # + papermill={"duration": 0.020325, "end_time": "2020-10-25T05:33:30.588878", "exception": false, "start_time": "2020-10-25T05:33:30.568553", "status": "completed"} tags=[] config # + [markdown] papermill={"duration": 0.012416, "end_time": "2020-10-25T05:33:30.613934", "exception": false, "start_time": "2020-10-25T05:33:30.601518", "status": "completed"} tags=[] # ## Prepare Dataset # + papermill={"duration": 0.017599, "end_time": "2020-10-25T05:33:30.643992", "exception": false, "start_time": "2020-10-25T05:33:30.626393", "status": "completed"} tags=[] from pathlib import Path from urllib.request import urlretrieve import zipfile import numpy as np def encode_file(_tokenizer, _filepath): ids = [] with open(_filepath) as f: for line in f.readlines(): text = line.strip("\n") ids.extend(_tokenizer.encode(text)) return np.array(ids, dtype=np.int32) # + papermill={"duration": 12.24382, "end_time": "2020-10-25T05:33:42.900304", "exception": false, "start_time": "2020-10-25T05:33:30.656484", "status": "completed"} tags=[] train_ids = encode_file(tokenizer, train_file) valid_ids = encode_file(tokenizer, valid_file) # + papermill={"duration": 0.017183, "end_time": "2020-10-25T05:33:42.930359", "exception": false, "start_time": "2020-10-25T05:33:42.913176", "status": "completed"} tags=[] print("Train:", train_ids.shape) print("Valid:", valid_ids.shape) # + papermill={"duration": 0.016829, "end_time": "2020-10-25T05:33:42.959985", "exception": false, "start_time": "2020-10-25T05:33:42.943156", "status": "completed"} tags=[] print(train_ids.shape) print(valid_ids.shape) # + papermill={"duration": 0.484628, "end_time": "2020-10-25T05:33:43.457723", "exception": false, "start_time": "2020-10-25T05:33:42.973095", "status": "completed"} tags=[] from tfchat.data import BlockDataset dataset = BlockDataset(block_size=config.context_size, batch_size=batch_size) train_dataset = dataset.build(train_ids, shuffle=True) valid_dataset = dataset.build(valid_ids, shuffle=False) # + papermill={"duration": 0.017, "end_time": "2020-10-25T05:33:43.488196", "exception": false, "start_time": "2020-10-25T05:33:43.471196", "status": "completed"} tags=[] num_train_steps = len([_ for _ in train_dataset]) num_valid_steps = len([_ for _ in valid_dataset]) print("Train steps:", num_train_steps) print("Valid steps:", num_valid_steps) # - # ## Transformers model implementation from transformers import TFGPT2LMHeadModel from transformers import GPT2Config import tensorflow.keras as keras import tensorflow as tf from tfchat.models import create_combined_mask # + class TransformersGPT2(keras.Model): def __init__(self, config): super().__init__() tf_config = GPT2Config( n_layers=config.num_layers, n_embd=config.d_model, n_head=config.num_heads, n_inner=config.d_ff, vocab_size=config.vocab_size, n_ctx=config.context_size, n_positions=config.context_size, attn_pdrop=config.attention_dropout_rate, resid_pdrop=config.residual_dropout_rate, embd_pdrop=config.embedding_dropout_rate, layer_norm_epsilon=config.epsilon, activation_function="gelu_new", # Default value of transformers implementation ) self._decoder = TFGPT2LMHeadModel(tf_config) def call(self, inputs, training): inputs = tf.cast(inputs, tf.int32) x = self._decoder(inputs, training=training) return x[0] # + [markdown] papermill={"duration": 0.013209, "end_time": "2020-10-25T05:33:43.514599", "exception": false, "start_time": "2020-10-25T05:33:43.501390", "status": "completed"} tags=[] # ## Prepare Model # + papermill={"duration": 0.019812, "end_time": "2020-10-25T05:33:43.547540", "exception": false, "start_time": "2020-10-25T05:33:43.527728", "status": "completed"} tags=[] from tfchat.losses import PaddingLoss from tfchat.schedules import WarmupLinearDecay import tensorflow.keras as keras def train(_model, _train_dataset, _valid_dataset, _epochs, _warmup_steps, _num_train_steps, _max_learning_rate, _clipnorm): schedule = WarmupLinearDecay(max_learning_rate=_max_learning_rate, warmup_steps=_warmup_steps, training_steps=_num_train_steps*_epochs) optimizer = keras.optimizers.Adam(schedule, beta_1=0.9, beta_2=0.999, epsilon=1e-8, clipnorm=_clipnorm) _model.compile(loss=PaddingLoss(), optimizer=optimizer) history = _model.fit( _train_dataset, validation_data=_valid_dataset, epochs=_epochs, callbacks=[ keras.callbacks.EarlyStopping(patience=1, restore_best_weights=True), # If you want to save chekcpoints, remove the next comment out #keras.callbacks.ModelCheckpoint("keras_model/", save_best_only=True) ], verbose=2, ) # - if model_type == "pre_ln": from tfchat.models import PreLNDecoder model = PreLNDecoder(config) elif model_type == "post_ln": from tfchat.models import PostLNDecoder model = PostLNDecoder(config) elif model_type == "transformers": model = TransformersGPT2(config) elif model_type == "min_gpt": from mingpt.model import GPT, GPTConfig mconf = GPTConfig(config.vocab_size, config.context_size, n_layer=config.num_layers, n_head=config.num_heads, n_embd=config.d_model) model = GPT(mconf) else: raise Exception("Model type is wrong") model.build(input_shape=(None, config.context_size)) model.summary() # + papermill={"duration": 535.285785, "end_time": "2020-10-25T05:42:39.126238", "exception": false, "start_time": "2020-10-25T05:33:43.840453", "status": "completed"} tags=[] train(model, train_dataset, valid_dataset, epochs, warmup_steps, num_train_steps, max_learning_rate, clipnorm) # + from tfchat.eval import perplexity print("Validation PPL:", perplexity(model, valid_dataset)) # + from tfchat.utils import save_model save_model(save_model_dir, model, config) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Time: O(n * s) where s -> sum of nums # Space: O(n * s) def subset_partition(nums): n = len(nums) s = sum(nums) if s % 2: return False target = s // 2 dp = [False] * (target + 1) dp[0] = True for i in range(n): dp2 = [True] for j in range(1, target + 1): if j < nums[i]: dp2.append(dp[j]) else: dp2.append(dp[j] or dp[j - nums[i]]) dp = dp2 return dp[-1] if __name__=='__main__': print(subset_partition([2, 3, 5, 6])) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Ubuntu Linux) # language: python # metadata: # cocalc: # description: Python 3 programming language # priority: 100 # url: https://www.python.org/ # name: python3 # --- # Rectangle - Neutral Axis in Middle - Rev 0.4 # **Abstract:** # Given: breadth or width b and depth or height d and units of each # Returns: Area, Section Modulus, Plastic Modulus, Second Moment of Inertia, Radius of Gyration, and distance to centroid # # **Instructions:** # Enter breadth, depth and units below the following code cell: # + deletable=false editable=false jupyter={"source_hidden": true} from IPython.display import Image Image( filename="./Rectangle_with_Neutral_Axis_in_centre.JPG", embed=True ) # + deletable=false editable=true jupyter={"source_hidden": false} # Setup units from pint import UnitRegistry unit = UnitRegistry() quantity = unit.Quantity unit.default_format = '~' # ~ for unit abreviations, P for pretty print, or both # Define symbols for common units m = unit.meter; mm = unit.millimeter; cm = unit.centimeter; inch = unit.inch; ft = unit.foot; unit_choices = ['mm', 'cm', 'm', 'inch', 'ft',] kN = unit.kilonewton; MPa = unit.megapascal; psi = unit.psi # Setup widgets for interactivity from ipywidgets import interact, interactive, fixed, interact_manual, HBox, Label import ipywidgets as widgets from IPython.display import display widget_b_label = widgets.Label('Width or breadth, $b$:') widget_b = widgets.FloatSlider( value = 89.0, min = 0.0, max = 99999, step = 0.001, disabled = False, ) widget_b_units = widgets.Dropdown( options = unit_choices, value = 'mm', description = 'Unit', disabled = False, ) widget_d_label = widgets.Label('Height or depth, $d$:') widget_d = widgets.FloatSlider( value = 89.0, min = 0.0, max = 99999, step = 0.001, disabled = False, ) widget_d_units = widgets.Dropdown( options = unit_choices, value = 'mm', description = 'Unit', disabled = False, ) widget_output_label = widgets.Label('Desired unit for results:') widget_output_units = widgets.Dropdown( options = unit_choices, value = 'mm', description = 'Unit', disabled = False, ) # Display instructions and widgets to user print('Enter dimensions and units below.') print('Note: Press ENTER after changing value in floating point text boxes to register changes.') display( HBox([widget_b_label, widget_b, widget_b_units, ]), HBox([widget_d_label, widget_d, widget_d_units, ]), HBox([widget_output_label, widget_output_units]), ) print('') print('NOTE: AFTER entering dimensions and units above,', 'FIRST, CLICK on the cell directly below this,\n', 'THEN--using Menus above--\n', 'CLICK "Cell/Run all Below" to update the Notebook') # + jupyter={"source_hidden": true} b = quantity(widget_b.value, widget_b_units.value) d = quantity(widget_d.value, widget_d_units.value) # Convert b and d to desired output units b.ito(widget_output_units.value) d.ito(widget_output_units.value) # Derive Geometric Properties - See bottom of notebook for pretty formulas A = b*d Sx = (b*d**2)/6 Sy = (d*b**2)/6 Zx = (b*d**2)/4 Zy = (d*b**2)/4 Ix = (b*d**3)/12 Iy = (d*b**3)/12 rx = d/(12**0.5) ry = b/(12**0.5) cx = d/2 cy = b/2 # Define Output print('Given:') print(' Width or breadth, b = {0:n} {1} and'.format(b.magnitude, b.units)) print(' Height or depth, d = {0:n} {1}'.format(d.magnitude, d.units)) print('') print('Geometric Properties:') print(' Area, A = {0:n} {1}'.format(A.magnitude, A.units)) print(' Major Elastic Section Modulus, Sx = {0:n} {1}'.format(Sx.magnitude, Sx.units)) print(' Minor Elastic Section Modulus, Sy = {0:n} {1}'.format(Sy.magnitude, Sy.units)) print(' Major Plastic Section Modulus, Zx = {0:n} {1}'.format(Zx.magnitude, Zx.units)) print(' Minor Plastic Section Modulus, Zy = {0:n} {1}'.format(Zy.magnitude, Zy.units)) print(' Major Second Moment of Inertia, Ix = {0:n} {1}'.format(Ix.magnitude, Ix.units)) print(' Minor Second Moment of Inertia, Iy = {0:n} {1}'.format(Iy.magnitude, Iy.units)) print(' Major Radius of Gyration, rx = {0:n} {1}'.format(rx.magnitude, rx.units)) print(' Minor Radius of Gyration, ry = {0:n} {1}'.format(ry.magnitude, ry.units)) print(' Distance from Major Axis to Extreme Fibre, cx = {0:n} {1}'.format( cx.magnitude, cx.units)) print(' Distance from Minor Axis to Extreme Fibre, cy = {0:n} {1}'.format( cy.magnitude, cy.units)) # - # **Formulas:** # # Area, $A = bd$ # Elastic Section Modulus, $S_x = bd^2/6$, $S_y = db^2/6$ # Plastic Section Modulus, $Z_x = bd^2/4$, $Z_y = db^2/4$ # Second Moment of Inertia, $I_x = bd^3/12$, $I_y = db^3/12$ # Radius of Gyration, $r_x = d/\sqrt12$, $r_y = b/\sqrt12$ # Distance from bottom left corner to centroid, $c_x = d/2$, $c_y = b/2$ # **Revision History:** # - Rev 0.4 23-Jun-2019 E.Durham Added widgets for input of dimensions and units and graphic # - Rev 0.3 19-Jun-2019 E.Durham Added units # - Rev 0.2 18-Jun-2019 E.Durham Added Plastic Section Modulus and revised formatting # - Rev 0.0 28-Mar-2019 E.Durham Created notebook using Figure 11.4 from Wood Design Manual 2017 # **ToDo / Issues:** # - Add interaction to formulas so that values are immediately updated as user updates dimensions and units so that the user does not need to re-run each of the cells to get results # - Add demonstration of symPy package # - Floating point text boxes do not show more than 2 digits # - Add autoscaling or right-sizing to rounding function and then results # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="7P_ERPToLUi2" # Vector Space Operations # + colab={"base_uri": "https://localhost:8080/"} id="Q3n1y3yDKrDu" outputId="13c7337c-7f33-4953-efcd-f47c7955d63e" import numpy as np A = np.array ([4,3]) B = np.array ([2,-5]) print('Vector A is', A) print('Vector B is', B) # + colab={"base_uri": "https://localhost:8080/"} id="s5xRDmVTS_-j" outputId="bdf90bfe-f550-44b1-8ad3-2ff520093b9e" import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) pool.shape # + colab={"base_uri": "https://localhost:8080/"} id="8NjOdifnTFQD" outputId="b09c7353-fe15-47d8-9f37-b537cb55a83c" import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) print(pool.shape) print(pool.size) # + colab={"base_uri": "https://localhost:8080/"} id="akgGG6w7THrr" outputId="8b54cd85-6df1-40d9-da58-2d4c61409186" import numpy as np ball1 = np.array ([1,2,3]) ball2 = np.array ([0,1,-1]) pool = np.array ([ball1,ball2]) pool.shape pool.ndim # + [markdown] id="tcbN7mppTRIH" # Vector Addition # # + colab={"base_uri": "https://localhost:8080/"} id="eLfRihJlTTYb" outputId="967169a2-ea95-42f7-e272-fe37928f2e54" import numpy as np A = np.array ([4,3]) B = np.array ([2,-5]) print('Vector A is', A) print('Vector B is', B) # + colab={"base_uri": "https://localhost:8080/"} id="W0bn7uuFTWWs" outputId="9d0b5edf-e96b-4255-bc8d-1dd05c4fa846" R = np.add(A,B) R # + [markdown] id="t0DXA876TggD" # Vector Subtraction # + colab={"base_uri": "https://localhost:8080/"} id="s1AJoVwXTdLy" outputId="9066d9fd-1d5a-47a7-a174-66bdf029e1d9" import numpy as np A = np.array ([4,3]) B = np.array ([2,-5]) print('Vector A is', A) print('Vector B is', B) # + colab={"base_uri": "https://localhost:8080/"} id="16J7skjUTkvb" outputId="84ac00e5-1089-4b09-a9c7-6476a8600aac" R = np.subtract(A,B) R # + [markdown] id="btvoW8s1ToZy" # Vector Scaling # + colab={"base_uri": "https://localhost:8080/"} id="IG37IIpJTqgL" outputId="117aad77-2f6d-45f0-b9e6-15c4eee9c253" A = np.array([1,5,8,9]) S = 5*A S # + colab={"base_uri": "https://localhost:8080/"} id="Y3Y8MnAmTs-U" outputId="734ce588-74dd-480a-e3bb-f1f7368c485a" A = np.array([1,5,8,9]) S = np.multiply(5,A) S # + [markdown] id="eZMVXp2XTusL" # Vector Cross Product # + colab={"base_uri": "https://localhost:8080/"} id="QnW8hAuVTxQn" outputId="5cf91521-8a2a-45aa-beb9-ecb1a168d494" import numpy as np A = np.array([2,3]) B = np.array([1,7]) output = np.cross(A,B) print(output) # + colab={"base_uri": "https://localhost:8080/"} id="b1Uq40IlT8sU" outputId="81b0cf37-658a-4f34-cad0-12b61ed226b7" A = np.array([2,3]) B = np.array([1,7]) cross = np.cross(A,B) print(cross) # + colab={"base_uri": "https://localhost:8080/"} id="Ao2bze1BT-sy" outputId="27713fff-92f9-4bb1-8181-8c010deb26b0" A = np.array([2,3,4]) B = np.array([1,7,0]) cross = np.cross(A,B) print(cross) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from scipy.optimize import minimize # # Optimize multivariable function using constrained optimization # Based on: https://www.youtube.com/watch?v=cXHvC_FGx24 # Let's minimize: # # $$min \ x_1 x_4 (x_1 + x_2 + x_3) + x_3$$ # # with the following constraints: # # * $x_1 x_2 x_3 x_4 \geq 40$ # # # * $x_1^2 + x_2^2 + x_3^2 + x_4^2 = 40$ # # # * $1 \leq x_1,\ x_2,\ x_3,\ x_4 \leq 5 $ # # # Let's define our initial values as # # $$X_0 = (1, 5, 5, 1)$$ # Define init vals X = np.array([1, 5, 5, 1]) # Define objective function def objective(x): return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2] # Sanity check objective(X) # + # Define constraint 1 (inequality) def constr_1(x): return x[0] * x[1] * x[2] * x[3] - 25 # Define constraint 2 (equality) def constr_2(x): return sum(x**2) - 40 # - # Sanity check constr_2(X) # Setup bounds for variables bound = (1, 5) bounds = tuple([bound]*4) # + # Specify additional information on constraints con_1 = {'type': 'ineq', 'fun' : constr_1 } con_2 = {'type': 'eq', 'fun' : constr_2 } cons = [con_1, con_2] # - # **NOTE**: Inequality `'ineq'` constraints always take form of `x >= n` # # https://stackoverflow.com/questions/42303470/scipy-optimize-inequality-constraint-which-side-of-the-inequality-is-considere # # https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html # Define a solution solution = minimize(objective, X, method = 'SLSQP', bounds = bounds, constraints = cons) # Examine solution print(solution) objective(solution.x) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import keras from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import numpy as np import xgboost as xgb data= pd.read_csv("C://Users///Desktop//.ipynb_checkpoints//Training Data.csv") data.head() data.info() from scipy import stats # + # df.drop('EtCO2',axis=1,inplace=True) # + #df.drop('bekaar',axis=1,inplace=True) # - F,p=stats.f_oneway(df) ### anova from pylab import rcParams missing = (data.isnull().sum() / data.shape[0]) * 100 missing # + rcParams['figure.figsize'] = 14, 5 _ = plt.bar(missing.index, missing) _ = plt.xticks(missing.index, rotation=90) plt.title("missing data percentage") # + selected_cols = list(missing[missing < 90].index) selected_cols # + fdata = data[selected_cols] fdata.head() # - categorical = fdata.dtypes == object categorical['Unit1'] = True categorical['Unit2'] = True categorical['Gender'] = True cat_vars = list(categorical[categorical].index) cont_vars = list(categorical[~categorical].index) cont_vars.pop(-1) import seaborn as sns rcParams['figure.figsize'] = 5, 5 for i,var in enumerate(cont_vars): plt.figure(i) plt.title("Density plot of " + str(var)) sns.distplot(fdata[var].dropna(), axlabel=var); x = np.linspace(start=-10, stop=10, num=1000) y = stats.norm.pdf(x, loc=0, scale=1.5) plt.plot(x, y) data1=data.HR sns.distplot(data1, bins=20, kde=False) data1=data.Bilirubin_direct sns.distplot(data1, bins=20, kde=False) data1=data.O2Sat sns.distplot(data1, bins=20, kde=False) class Gaussian: "Model univariate Gaussian" def __init__(self, mu, sigma): self.mu = mu self.sigma = sigma def pdf(self, datum): "Probability of a data point given the current parameters" u = (datum - self.mu) / abs(self.sigma) y = (1 / (sqrt(2 * pi) * abs(self.sigma))) * exp(-u * u / 2) return y def __repr__(self): return 'Gaussian({0:4.6}, {1:4.6})'.format(self.mu, self.sigma) data1=data.HR best_single = Gaussian(np.mean(data1), np.std(data1)) print('Best single Gaussian: μ = {:.2}, σ = {:.2}'.format(best_single.mu, best_single.sigma)) x = np.linspace(-6, 8, 200) g_single = stats.norm(best_single.mu, best_single.sigma).pdf(x) sns.distplot(data1, bins=20, kde=False, norm_hist=True) plt.plot(x, g_single, label='single gaussian') plt.legend(); fdata.loc[(fdata['Temp'] >= 36.4) & (fdata['Temp'] < 37.6),'data'] = 'sahi' fdata.loc[(fdata['Temp'] < 36.4) | (fdata['Temp'] >= 37.6),'tapmaan'] = 'kharabh' fdata['tapmaan'].fillna('ptanhi', inplace=True) # + fdata.loc[(fdata['HR'] >= 100) & (fdata['Age'] >= 10 ),'dhadhkan'] = 'kharabh' fdata.loc[(data['HR'] < 100) & (fdata['HR'] > 60) & (fdata['Age'] >= 10 ),'dhadhkan'] = 'sahi' fdata.loc[(data['HR'] >= 70) & (fdata['HR'] < 190) & (fdata['Age'] < 10 ),'dhadhkan'] = 'sahi' fdata.loc[((fdata['HR'] < 70) | (fdata['HR'] >= 190)) & (fdata['Age'] < 10 ),'dhadhkan'] = 'kharabh' fdata['dhadhkan'].fillna('ptanhi', inplace=True) # - fdata.head() fdata.loc[(fdata['O2Sat'] >= 90) & (fdata['O2Sat'] < 100),'o2stat'] = 'sahi' fdata.loc[(fdata['O2Sat'] < 90) & (fdata['O2Sat'] >= 0),'o2stat'] = 'kharabh' fdata['o2stat'].fillna('ptanhi', inplace=True) # + fdata.head() # - fdata.loc[(fdata['Resp'].between(30,60)) & (fdata['Age'] <1),'sansein'] = 'sahi' fdata.loc[((fdata['Resp'] < 30) | (fdata['Resp'] > 60)) & (fdata['Age'] <1) ,'sansein'] = 'kharabh' fdata.loc[(fdata['Resp'].between(24,40)) & (fdata['Age'].between(1,3)),'sansein'] = 'sahi' fdata.loc[((fdata['Resp'] < 24) | (fdata['Resp'] > 40)) & (fdata['Age'].between(1,3)) ,'sansein'] = 'kharabh' fdata.loc[(fdata['Resp'].between(22,34)) & (fdata['Age'].between(3,6)),'sansein'] = 'sahi' fdata.loc[((fdata['Resp'] < 22) | (fdata['Resp'] > 34)) & (fdata['Age'].between(3,6)) ,'sansein'] = 'kharabh' fdata.loc[(fdata['Resp'].between(18,30)) & (fdata['Age'].between(6,12)),'sansein'] = 'sahi' fdata.loc[((fdata['Resp'] < 18) | (fdata['Resp'] > 30)) & (fdata['Age'].between(6,12)) ,'sansein'] = 'kharabh' fdata.loc[(fdata['Resp'].between(12,20)) & (fdata['Age'] >12),'sansein'] = 'sahi' fdata.loc[((fdata['Resp'] < 12) | (fdata['Resp'] > 20)) & (fdata['Age'] >12),'sansein'] = 'kharabh' fdata['sansein'].fillna('ptanhi', inplace=True) fdata.head() fdata.loc[fdata['Age'] >=65, 'umar'] = 'naman' fdata.loc[fdata['Age'] <1, 'umar'] = 'mukul' fdata.loc[(fdata['Age'] >=1) & (fdata['Age'] <65),'umar'] = 'jawani' fdata.loc[(fdata['SBP'] <90) & (fdata['DBP'] <60), 'bp'] = 'low' fdata.loc[(fdata['SBP'].between(90,120, inclusive=True)) & (fdata['DBP'].between(60,80, inclusive=True)),'bp'] = 'normal' fdata.loc[(fdata['SBP'].between(120,140, inclusive=True)) & (fdata['DBP'].between(80,90, inclusive=True)),'bp'] = 'above normal' fdata.loc[(fdata['SBP'] > 140 ) & (fdata['DBP'] > 90 ), 'bp'] = 'high' fdata['bp'].fillna('ptanhi', inplace=True) fdata.head() df=pd.DataFrame(fdata) df.head() #df.drop('Resp',axis=1,inplace=True) df.drop('HR',axis=1,inplace=True) df.drop('O2Sat',axis=1,inplace=True) df.drop('Temp',axis=1,inplace=True) df.drop('SBP',axis=1,inplace=True) df.drop('bekaar',axis=1,inplace=True) df.drop('DBP',axis=1,inplace=True) df.drop('data',axis=1,inplace=True) df.head() data.head(50) df.describe() df.head() df.corr() corr=df.corr().SepsisLabel print(corr) # + def plot_corr(df,size=10): corr = df.corr() fig, ax = plt.subplots(figsize=(size, size)) ax.matshow(corr) plt.xticks(range(len(corr.columns)), corr.columns); plt.yticks(range(len(corr.columns)), corr.columns); # - x=plot_corr(df,10) from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D # + y=df['Bilirubin_direct'].values X=df['Bilirubin_total'].values # + X, Y = np.meshgrid(X, Y) R = np.sqrt(X**2 + Y**2) Z = np.sin(R) fig = plt.figure() ax = Axes3D(fig) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis) plt.show() # - sns.heatmap(df) # + #pd.plotting.scatter_matrix(df, alpha=0.2, figsize=(10, 10)) #plt.show() # + active="" # corrmat = df.corr() # # cg = sns.clustermap(corrmat, cmap ="YlGnBu", linewidths = 0.1); # plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation = 0) # # cg # - import pingouin as pg corr=df.corr().SepsisLabel df_corr=pd.DataFrame(corr) df_corr.head() df_corr.sort_values(ascending=False,by="SepsisLabel") df=pd.get_dummies(df) df.head() df.drop('Resp',axis=1,inplace=True) df.columns df.drop('Age',axis=1,inplace=True) df.drop('dhadhkan_ptanhi',axis=1,inplace=True) df.drop('o2stat_ptanhi',axis=1,inplace=True) df.drop('sansein_ptanhi',axis=1,inplace=True) df.drop('bp_ptanhi',axis=1,inplace=True) df.drop('umar_jawani',axis=1,inplace=True) df.head() df.columns X_int=df.drop('SepsisLabel',axis=1).values Y_int=df['SepsisLabel'].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_int,Y_int, test_size=0.2,random_state=42) # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer steps = [ ('imp', Imputer(missing_values="NaN",strategy="mean",axis=0)), ('clf', xgb.XGBClassifier(objective='binary:logistic', n_estimators=1000, seed=1))] pipeline = Pipeline(steps) pipeline.fit(X_train, y_train) pipeline.predict(X_test) pipeline.score(X_test, y_test) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # IST256 Project Deliverable 3 (P3) # # ## Phase 3: Implementation # # In this step, you submit the final version of working code. No changes to your code will be considered after this submission. It is important to take prior instructor feedback taken into consideration and these factor into your evaluation. # # **IMPORTANT**: Don't forget to journal your work on the project as it factors into the evaluation of your work! # ### Step 1: What is Your Idea, Again? # # Please reiterate your project idea below (you can copy it from P1/P2). # # `--== Double-click and put the title or brief description of your project below ==--` # # # ### Step 2: Project Code # # Include all project code below. Make sure to execute your code to ensure it runs properly before you turn it in. # # ### Prepare for your Pitch and Reflection # # With the project code complete, its time to prepare for the final deliverable - submitting your project demo Pitch and reflection. # # run this code to turn in your work! from coursetools.submission import Submission Submission().submit() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Timing OpenCL kernels # # Measuring the performance of OpenCL kernels is important for finding out how well they peform. The OpenCL framework provides a way to get profiling information from kernels. # We re-use the matrix multiplication code for this example. One optimisation that can be applied is to transpose the matrix before using it in the matrix computation as shown in the following figure: #
# #
Figure: Matrix transpose as a potential optimisation
#
# # The theory behind this is that since memory elements in dimension 0 (the row dimension) are contiguous, memory accesses to and from matrix $A$ will be more efficient if $A$ was transposed ($A^{T}$) prior to computing the matrix multiplication. Then an element of matrix C at position $(m,n)$ is computed as the dot product of column $m$ from matrix $A^T$ and column $n$ from matrix $B$. # The code [mat_mult_transpose](code/mat_mult_transpose.cpp) has been modified to include two additional kernels that transpose matrix A and use it in the matrix multiplication. It uses the same data as the simple matrix multiplication code, however we regenerate the data here for convenience. # + jupyter={"outputs_hidden": true} # Code to make the test file import numpy as np import os nrows=1024 ncols=1024 # Make up some arrays of random numbers matrix_A=np.random.random((nrows, ncols)).astype(np.float32) matrix_B=np.random.random((nrows, ncols)).astype(np.float32) # Make up the answer, the resulting Matrix C matrix_C=np.matmul(matrix_A, matrix_B).astype(np.float32) # Write the files to grid, notice how I unravel in column major (f) format # before writing to file matrix_A.ravel(order="f").tofile(os.path.join("code","array_A_1D.dat")) matrix_B.ravel(order="f").tofile(os.path.join("code","array_B_1D.dat")) matrix_C.ravel(order="f").tofile(os.path.join("code","array_C_answer_1D.dat")) # - # After compilation you can just run the code and see how much faster the transposed version is compared to the standard matrix multiply. # !cd code; ./mat_mult_transpose # Try editing line 77 to switch from CPU devices to GPU devices if you have one available and recompile. # # from # # ```C # cl_device_type target_device=CL_DEVICE_TYPE_CPU; # ``` # # to # # ```C # cl_device_type target_device=CL_DEVICE_TYPE_GPU; # ``` # !cd code; ./mat_mult_transpose # For my machine and a GPU it turns out that the transposed matrix calculation is actually around 5x *slower* than the standard implementation. If I use the CPU then the transposed matrix approach is around 2.6x *faster*. I am not certain why the code on the GPU is slower, however being able to find this information out is very helpful. # ## Machinery to time kernels # In order to time kernels, a few key changes must be made to the standard matrix multiplication code. Firstly, the command queue has to be enabled to measure profiling events. This is achieved by setting the flag **CL_QUEUE_PROFILING_ENABLE** in the call to the [clCreateCommandQueue](https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clCreateCommandQueue.html) function when the command queues are created. # # ```C # command_queues_1d[queue_counter]=clCreateCommandQueue( contexts_1d[platform_counter], # devices_2d[platform_counter][j], # CL_QUEUE_PROFILING_ENABLE, # &errcode); # ``` # # OpenCL events can store timing information from commands that are enqueued. In order to time the kernels we need to associate an OpenCL **Event** with each kernel execution. Watch how I use the event **event_mat_mult** in the execution of the standard matrix multiply kernel. # ```C # cl_event event_mat_mult; # # // Now enqueue the standard matrix multiply kernel # errchk(clEnqueueNDRangeKernel( command_queue, # kernel_mat_mult, # work_dim, # NULL, # global_size_mat_mult, # NULL, # 1, # &event_mat_transpose, # &event_mat_mult), "Running the kernel"); # # ``` # This kernel enqueue waits for **event_mat_transpose** from the matrix transpose kernel and associates **event_mat_mult** with the running of that kernel. # At the end of running the kernels it is important to make sure they are finished so we can get proper timing information from the events. I add a **clFinish** to make sure the code finishes all work with the command queue. # ```C # clFinish(command_queue); # ``` # Now to extract the timing information from the completed events we use [clGetEventProfilingInfo](https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clGetEventProfilingInfo.html) to extract the integer start and stop times (in nanoseconds) of the enqueued kernels. The code below extracts the start and stop times and converts the elapsed time to milliseconds. # ```C # // Get the timing information from each event # cl_ulong start_counter=0, end_counter=0; # # // next the standard matrix multiply # clGetEventProfilingInfo( event_mat_mult, # CL_PROFILING_COMMAND_START, # sizeof(cl_ulong), # &start_counter, # NULL); # clGetEventProfilingInfo( event_mat_mult, # CL_PROFILING_COMMAND_END, # sizeof(cl_ulong), # &end_counter, # NULL); # # // This should give the time in milliseconds # cl_double time_mat_mult=(cl_double)(end_counter-start_counter)*(cl_double)1.0e-6; # # printf("Standard matrix multiply took %f ms\n", time_mat_mult); # ``` #
# © 2018 by Dr.
# email:
# Visit us at: www.pelagos-consulting.com
#
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="dtvIIWbSLA2w" # The notebook is a helper for the blog : https://www.amankalra.com/machine-learning/using-nlp-to-detect-fake-news # # Download the data here: https://www.kaggle.com/clmentbisaillon/fake-and-real-news-dataset # # For any issues, contact at # # Happy Learning! # + [markdown] id="JWyhgatFKoYn" # # Importing Libraries # + id="APDYH-QRKGQk" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import re import string import os # + colab={"base_uri": "https://localhost:8080/"} id="7DVBLzsKNfMK" outputId="4a4849bc-706f-4354-b52a-8cc955490e2f" from google.colab import drive drive.mount('/content/drive') # + id="CSUSIRzZOnQf" os.chdir('/content/drive/MyDrive/Portfolio/Project Notebooks/Fake News detection/') # + [markdown] id="Bt44q7SrK82v" # # Loading the Data # + id="CpALTjp2NZ2Q" df_fake = pd.read_csv('Fake.csv') df_true = pd.read_csv('True.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="_Vi0xDzUjM-j" outputId="9bca4d18-4fb1-4e2c-d523-f6408cefd8c0" df_fake.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="L2-XIaoujQ0p" outputId="834b8e7d-cefa-4f94-8797-c8173cac4892" df_true.head() # + id="2TuyqsnrkV5E" df_fake['class'] = 0 df_true['class'] = 1 # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Uv0yR8t_kguJ" outputId="0eaf9015-8570-4aea-e01d-38ee8d720b9b" df_fake.head() # + colab={"base_uri": "https://localhost:8080/"} id="3D6a2-h_kjZB" outputId="8622d390-0a6a-42f2-b298-fb258e7e799a" df_fake.shape, df_true.shape # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="VcRSGQH3kmtJ" outputId="4cdd47ac-95ba-4187-bf8e-7aecee1f5f7f" df_merge = pd.concat([df_fake, df_true], axis =0 ) df_merge.head(10) # + [markdown] id="PoKJXnOILfAM" # # Data Formatting # + colab={"base_uri": "https://localhost:8080/"} id="EO0JmXGclSP5" outputId="54f6b3ca-e572-4fb3-8945-a7bb21ff750e" df_merge.columns # + id="GOZi22J7lWXB" df = df_merge.drop(["title", "subject","date"], axis = 1) # + colab={"base_uri": "https://localhost:8080/"} id="2GS9UhzWld_Z" outputId="f7a8e6f6-7ec7-4178-cf3d-891fa9cee225" df.isnull().sum() # + id="fFbdGa4TlgC5" df = df.sample(frac= 1) # + colab={"base_uri": "https://localhost:8080/"} id="D9xDOxNkl5LA" outputId="4df46ab9-faab-473a-b14f-b72b030f375e" df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="q57gxTKyl65Z" outputId="eabbc2db-16bd-4e51-c2f4-d3c9b691ecaa" df.head() # + id="tw7zeXJRmGsS" df.reset_index(inplace = True) df.drop(["index"], axis = 1, inplace = True) # + colab={"base_uri": "https://localhost:8080/"} id="9WJTcp9xmSIZ" outputId="c9193fbf-fc67-4865-9946-65209eb7bc22" df.columns # + id="pkDadZoOmTmn" def wordopt(text): text = text.lower() text = re.sub('\[.*?\]', '', text) text = re.sub("\\W"," ",text) text = re.sub('https?://\S+|www\.\S+', '', text) text = re.sub('<.*?>+', '', text) text = re.sub('[%s]' % re.escape(string.punctuation), '', text) text = re.sub('\n', '', text) text = re.sub('\w*\d\w*', '', text) return text # + id="DOSl-esWmlpa" df["text"] = df["text"].apply(wordopt) # + id="GAb_ZHdOmvXw" x = df['text'] y = df['class'] # + id="Hc0guwg8m35K" x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25) # + colab={"base_uri": "https://localhost:8080/"} id="xephtqsHm76a" outputId="b2c65ba5-b7e3-496e-a2a9-0e4910c96f1e" x_train.head() # + [markdown] id="Za76U1tzLpgS" # # Tokenizing Data # # + id="p3iESEJnm-va" #comverting text to vector from sklearn.feature_extraction.text import TfidfVectorizer vectorization = TfidfVectorizer() xv_train = vectorization.fit_transform(x_train) xv_test = vectorization.transform(x_test) # + [markdown] id="rTawQ_CgLwaI" # # Model Building # + id="6czCUfiNnE4C" #modeling #logestic regression # + colab={"base_uri": "https://localhost:8080/"} id="M0Kew_K7nzbA" outputId="7c01eadb-f1db-493e-fb65-49268c0a43ce" from sklearn.linear_model import LogisticRegression LR = LogisticRegression() LR.fit(xv_train,y_train) # + id="QzSDjnLSoOyG" pred_lr=LR.predict(xv_test) # + colab={"base_uri": "https://localhost:8080/"} id="ILoFe6hFoA06" outputId="de12a380-647a-479c-f03a-79b54b1b0065" LR.score(xv_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="uh8AoaSHoJla" outputId="456616c7-e0eb-4905-de24-d5129f139b85" print(classification_report(y_test,pred_lr)) # + id="58Sa5EBpoZnC" #decision tree # + colab={"base_uri": "https://localhost:8080/"} id="S-qAInmFo3_9" outputId="040f9cc7-2c02-439e-bb69-1b507b5d55d8" from sklearn.tree import DecisionTreeClassifier DT = DecisionTreeClassifier() DT.fit(xv_train, y_train) # + id="stRYUd7Wo9Eb" pred_dt = DT.predict(xv_test) # + colab={"base_uri": "https://localhost:8080/"} id="RYoee1x1o-Vy" outputId="fc7da83e-e36f-4a46-d492-a41f7076d012" DT.score(xv_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="e9ukVdtzpAKi" outputId="af4e83fe-91bb-4b55-d318-476e7664e7c5" print(classification_report(y_test, pred_dt)) # + id="09r0LRkYpCOg" #gradient boosting classifier # + colab={"base_uri": "https://localhost:8080/"} id="CINKA87tpEpa" outputId="aad27fb1-6b43-4a26-aec5-ce4e107ef065" from sklearn.ensemble import GradientBoostingClassifier GBC = GradientBoostingClassifier(random_state=0) GBC.fit(xv_train, y_train) # + id="qDaex3BnpHui" pred_gbc = GBC.predict(xv_test) # + colab={"base_uri": "https://localhost:8080/"} id="OuupZANWpLTp" outputId="ef7906a2-2e38-4369-f55d-d588f2150d3f" GBC.score(xv_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="c2PmNIJTpOLK" outputId="b448f627-93dd-481f-c72f-ad6721c355a8" print(classification_report(y_test, pred_gbc)) # + id="PgTB5F_ppP95" #random forest # + colab={"base_uri": "https://localhost:8080/"} id="NHO9OFYMpQ_R" outputId="5fc05480-61af-4414-97db-9bf23192114f" from sklearn.ensemble import RandomForestClassifier RFC = RandomForestClassifier(random_state=0) RFC.fit(xv_train, y_train) # + id="wuBMOek3pSlh" pred_rfc = RFC.predict(xv_test) # + colab={"base_uri": "https://localhost:8080/"} id="fF0vv2FKpUEy" outputId="cb2ed0fa-2bb5-4851-ce2a-2d5169e84ef6" RFC.score(xv_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="6hn8f6UtpWNz" outputId="d89d8d06-0f60-4ccd-b233-5deaf46fe33f" print(classification_report(y_test, pred_rfc)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import scipy as sp from scipy.optimize import curve_fit import matplotlib.pyplot as plt import sympy as sy from sympy.solvers import solve import pandas as pd from numpy import exp, loadtxt, pi, sqrt import lmfit as lm from lmfit import Model plt.rc('text', usetex=True) plt.rc('font', family='serif') df1=pd.DataFrame.from_csv('A2_Schuster.csv',sep=';',encoding='utf-8') #df1 #Zeigt dir die Tabelle an df2=df1.sort_values('r in cm') r=df2['r in m']= df2['r in cm']*1e-2 B=df2['B in T']=df2['B in mT']*1e-3 U=df2['U in V'] df2 def func(x,a,b): return a*x+b # + u=0;v=9 x=r[u:v]**2*B[u:v]**2;y=2*U[u:v] gmodel = Model(func) result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print(perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x[u:v], y[u:v], yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(2.8*1e-9,350,s2,ha="left",va="center",bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke',)) plt.xlabel(r'$r^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$2U$ in $V$',fontsize=16) plt.title(r'Schuster mit r=2cm',fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_2cm.pdf", bbox_inches='tight', format='pdf') plt.show() # - u=0;v=9;g=2 while v<= len(r): x=r[u:v]**2*B[u:v]**2;y=2*U[u:v] gmodel = Model(func) result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3,alpha=0.5) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$r^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$2U$ in $V$',fontsize=16) plt.title(r'Schuster mit r='+str(g)+'cm',fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_"+g+"cm.pdf", bbox_inches='tight', format='pdf') plt.show() u+=9;v+=9;g+=1 # + x=r**2*B**2;y=2*U gmodel = Model(func) result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') #plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$r^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$2U$ in $V$',fontsize=16) plt.title(r'Schuster',fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_ges.pdf", bbox_inches='tight', format='pdf') plt.show() # - # ## Methode nach Busch df3=pd.DataFrame.from_csv('A2_Busch.csv',sep=';',encoding='utf-8') df3 #Zeigt dir die Tabelle an df4=df3.sort_values(['n','U in V']) n=df4['n'] B=df4['B in T']=df4['B in mT']*1e-3 U=df4['U in V'] df4 # + u=0;v=7 x=(0.13**2)*B[u:v]**2; y=8*(np.pi**2)*(n[u:v]**2)*U[u:v] gmodel = Model(func,nan_policy='omit') result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$s^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$8 \pi^{2} n^{2} U$ in $V$',fontsize=16) plt.title(r'Busch mit n='+str(1),fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_"+g+"cm.pdf", bbox_inches='tight', format='pdf') plt.show() u=7;v=14 x=(0.13**2)*B[u:v]**2; y=8*(np.pi**2)*(n[u:v]**2)*U[u:v] gmodel = Model(func,nan_policy='omit') result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$s^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$8 \pi^{2} n^{2} U$ in $V$',fontsize=16) plt.title(r'Busch mit n='+str(2),fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_"+g+"cm.pdf", bbox_inches='tight', format='pdf') plt.show() u=14;v=20 x=(0.13**2)*B[u:v]**2; y=8*(np.pi**2)*(n[u:v]**2)*U[u:v] gmodel = Model(func,nan_policy='omit') result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$s^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$8 \pi^{2} n^{2} U$ in $V$',fontsize=16) plt.title(r'Busch mit n='+str(3),fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_"+g+"cm.pdf", bbox_inches='tight', format='pdf') plt.show() # + x=(0.13**2)*B**2; y=8*(np.pi**2)*(n**2)*U gmodel = Model(func,nan_policy='propagate') result = gmodel.fit(y, x=x, a=1.7e11, b=10) pcov= result.covar perr = np.sqrt(np.diag(pcov)) #print(result.fit_report()) print(result.best_values) popt=result.best_values print('Fehler [a,b]:',perr) plt.plot(x, y,'o',color='C0',label=r'Messdaten') plt.errorbar(x, y, yerr=perr[1], fmt='.',color='r',capsize=3, label='Fehler',alpha=0.4) plt.plot(x, result.best_fit,'-',color='C1',label=r'Fit: y = ax + b') ax=plt.axes() s2=r'''\begin{tabular}{ c | c | c } %s & Wert & Fehler \\\hline %s & %4.3g & %3.3g \\\hline %s &%3.3g & %3.3g \end{tabular}'''%('','a',popt['a'],perr[0],'b',popt['b'],perr[1]) plt.text(0.75, 0.15,s2,horizontalalignment='center',verticalalignment='center', transform = ax.transAxes, bbox=dict(boxstyle="square",ec='gold',fc='whitesmoke')) plt.xlabel(r'$s^{2}B^{2}$ in $m^2 T^2$',fontsize=16) plt.ylabel(r'$8 \pi^{2} n^{2} U$ in $V$',fontsize=16) plt.title(r'Busch mit n='+str(g),fontsize=18) plt.legend(loc='upper left') #plt.savefig("Schuster_"+g+"cm.pdf", bbox_inches='tight', format='pdf') plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + cellView="form" id="EbEbhy9hvF7i" outputId="19143128-fb9b-4aff-c365-5fb38b1d7382" colab={"base_uri": "https://localhost:8080/"} #@title A simplified reservoir simulation model #@markdown In this section we will use a simplified reservoir simulation model based in constant volume - temperature flashes. This will be used for developing fluid input to further gas processing calculations. #@markdown

This document is part of the module ["Introduction to Gas Processing using NeqSim in Colab"](https://colab.research.google.com/github/EvenSol/NeqSim-Colab/blob/master/notebooks/examples_of_NeqSim_in_Colab.ipynb#scrollTo=_eRtkQnHpL70). # #%%capture # !pip install neqsim import neqsim from neqsim.thermo.thermoTools import * from neqsim.process.processTools import simplereservoir import matplotlib import numpy as np import matplotlib.pyplot as plt import math plt.style.use('classic') # %matplotlib inline # + [markdown] id="sVE0XWti5qav" # # Reservoir simulation - the tank model # One of the representations of reservoir models is called the Tank Model, is also called "the building block of reservoir simulators", it is a zero dimensional model, because just like in the example of the oil MBE equation, the difference between the initial oil and the final oil entering the reservoir is the Cumulative net withdrawal. The tank model takes the average values for the whole properties of the reservoir by assuming that there is no oil entering the reservoir. However, the tank model cannot represent sandbars and average them out as a whole due to variation of lithology, but instead it can average out the components within. In this case, because of the expansion of fluids complications, the MBE equation together with Darcy's Law are used to describe the behavior of each component. This new model is a one- dimensional simulator because it has more than one component in one direction and one component in the other two directions. # # Ref.: https://wiki.seg.org/wiki/Reservoir_simulation # + id="HqFMKNfG5qaw" outputId="b2824c9b-a13e-4a17-c4c0-08a57c469786" colab={"base_uri": "https://localhost:8080/", "height": 684} fluid1 = fluid('srk') fluid1.addComponent("nitrogen", 0.100) fluid1.addComponent("CO2", 0.100) fluid1.addComponent("methane", 30.00) fluid1.addComponent("ethane", 1.0) fluid1.addComponent("propane", 1.0) fluid1.addComponent("i-butane", 1.0) fluid1.addComponent("n-butane", 1.0) fluid1.addComponent("n-hexane", 0.1) fluid1.addComponent("n-heptane", 0.1) fluid1.addComponent("n-nonane", 1.0) fluid1.addComponent("nC10", 1.0) fluid1.addComponent("nC12", 3.0) fluid1.addComponent("nC15", 3.0) fluid1.addComponent("nC20", 3.0) fluid1.addComponent("water", 11.0) fluid1.setMixingRule(2) fluid1.setMultiPhaseCheck(True) fluid1.setPressure(100.0, 'bara') fluid1.setTemperature(100.0, 'C') TPflash(fluid1) reservoirOps = simplereservoir(fluid1, "Well 1 reservoir", gasvolume=5.0 * 1e7, oilvolume=552.0 * 1e6, watervolume=10.0e6) producedGasStream = reservoirOps.addGasProducer("SLP_A32562G") producedGasStream.setFlowRate(0.01, "MSm3/day") injectionGasStream = reservoirOps.addGasInjector("SLP_A32562GI") fluidComposition(injectionGasStream.getFluid(), [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0]) injectionGasStream.setFlowRate(1.1, "MSm3/day") injectionWaterStream = reservoirOps.addWaterInjector("SLP_A32562W") injectionWaterStream.setFlowRate(10000000.1, "kg/day") producedOilStream = reservoirOps.addOilProducer("SLP_A32562O") producedOilStream.setFlowRate(50000000.0, "kg/day") deltat = 24*60*60.0*365 time = [] pressure = [] gasvolume = [] watervolume = [] oilvolume = [] wateringas = [] totalproducedoe = [] co2reservoirgas = [] for t in range(10): reservoirOps.runTransient(deltat) time.append(t*deltat/(24*60*60.0*365)) pressure.append(reservoirOps.getReservoirFluid().getPressure('bara')) gasvolume.append(reservoirOps.getReservoirFluid().getPhase("gas").getVolume("m3")) oilvolume.append(reservoirOps.getReservoirFluid().getPhase("oil").getVolume("m3")) watervolume.append(reservoirOps.getReservoirFluid().getPhase("aqueous").getVolume("m3")) wateringas.append(reservoirOps.getReservoirFluid().getPhase("gas").getComponent('water').getx()) totalproducedoe.append(reservoirOps.getProductionTotal('MSm3 oe')) co2reservoirgas.append(reservoirOps.getReservoirFluid().getComponent('CO2').getz()) plt.rcParams['figure.figsize'] = [10, 20] fig, ax = plt.subplots() plt.subplot(7, 1, 1) plt.plot(time, pressure) plt.xlabel('Year [-]') plt.ylabel('Pressure [bara]') plt.subplot(7, 1, 2) plt.plot(time, gasvolume) plt.xlabel('Year [-]') plt.ylabel('gasVol [m3]') plt.subplot(7, 1, 3) plt.plot(time, oilvolume) plt.xlabel('Year [-]') plt.ylabel('oilVol [m3]') plt.subplot(7, 1, 4) plt.plot(time, watervolume) plt.xlabel('Year [-]') plt.ylabel('waterVol [m3]') plt.subplot(7, 1, 5) plt.plot(time, wateringas) plt.xlabel('Year [-]') plt.ylabel('water in gas [-]') plt.subplot(7, 1, 6) plt.plot(time, totalproducedoe) plt.xlabel('Year [-]') plt.ylabel('Total production [MSm3 oe]') plt.subplot(7, 1, 7) plt.plot(time, co2reservoirgas) plt.xlabel('Year [-]') plt.ylabel('CO2 in reservoir gas [mole frac]') # + id="r4wsTuOD5qaw" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk from nltk.collocations import * import pandas as pd with open('data.txt', 'rb') as f: data = f.read() data = str(data).split(" ") bigram_measures = nltk.collocations.BigramAssocMeasures() finder = BigramCollocationFinder.from_words(data) finder.apply_freq_filter(15) finder.nbest(bigram_measures.raw_freq, 15) # + bigram_measures = nltk.collocations.BigramAssocMeasures() finder = BigramCollocationFinder.from_words(data) finder.apply_freq_filter(15) df=finder.nbest(bigram_measures.raw_freq, 15) num = len(df) tempword = '' wtempword = '' whole = [] for i in range (0, num): tempnum = len(df[i]) for x in range (0, tempnum): tempword = df[i][x] wtempword = wtempword + " " + tempword whole.append(wtempword.lstrip()) tempword = "" wtempword = "" Resturants = pd.read_excel(r"From2016_Sentimental_Burpple.xlsx") Reviews = Resturants['Cleaned_Reviews'].tolist() polarity = Resturants['polarity'].tolist() num = len(whole) num1 = len(Reviews) count = 0 countlist = [] targetword = '' targetreview = '' average = float(0) total = float(0) averagelist = [] for i in range (0, num): targetword = whole[i] for x in range( 0, num1): targetreview = str(Reviews[x]) if targetreview.find(targetword) >= 0: count = count + 1 total = total + polarity[x] targetreview = "" average = total/count averagelist.append(average) countlist.append(count) count = 0 average = 0 total = 0 targetword = "" Freq = pd.DataFrame() se = pd.Series(whole) Freq['Word'] = se se = pd.Series(countlist) Freq['Count'] = se se = pd.Series(averagelist) Freq['Average_Polarity'] = se Freq # - trigram_measures = nltk.collocations.TrigramAssocMeasures() finder = TrigramCollocationFinder.from_words(data) finder.apply_freq_filter(15) finder.nbest(trigram_measures.raw_freq, 15) # + trigram_measures = nltk.collocations.TrigramAssocMeasures() finder = TrigramCollocationFinder.from_words(data) finder.apply_freq_filter(15) df = finder.nbest(trigram_measures.raw_freq, 15) num = len(df) tempword = '' wtempword = '' whole = [] for i in range (0, num): tempnum = len(df[i]) for x in range (0, tempnum): tempword = df[i][x] wtempword = wtempword + " " + tempword whole.append(wtempword.lstrip()) tempword = "" wtempword = "" Resturants = pd.read_excel(r"From2016_Sentimental_Burpple.xlsx") Reviews = Resturants['Cleaned_Reviews'].tolist() polarity = Resturants['polarity'].tolist() num = len(whole) num1 = len(Reviews) count = 0 countlist = [] targetword = '' targetreview = '' average = float(0) total = float(0) averagelist = [] for i in range (0, num): targetword = whole[i] for x in range( 0, num1): targetreview = str(Reviews[x]) if targetreview.find(targetword) >= 0: count = count + 1 total = total + polarity[x] targetreview = "" average = total/count averagelist.append(average) countlist.append(count) count = 0 average = 0 total = 0 targetword = "" Freq1 = pd.DataFrame() se = pd.Series(whole) Freq1['Word'] = se se = pd.Series(countlist) Freq1['Count'] = se se = pd.Series(averagelist) Freq1['Average_Polarity'] = se Freq1 # - finder = BigramCollocationFinder.from_words(data) scored = finder.score_ngrams(bigram_measures.raw_freq) set(bigram for bigram, score in scored) sorted(finder.nbest(bigram_measures.raw_freq,20)) # + finder = BigramCollocationFinder.from_words(data) scored = finder.score_ngrams(bigram_measures.raw_freq) set(bigram for bigram, score in scored) df = sorted(finder.nbest(bigram_measures.raw_freq,20)) num = len(df) tempword = '' wtempword = '' whole = [] for i in range (0, num): tempnum = len(df[i]) for x in range (0, tempnum): tempword = df[i][x] wtempword = wtempword + " " + tempword whole.append(wtempword.lstrip()) tempword = "" wtempword = "" Resturants = pd.read_excel(r"From2016_Sentimental_Burpple.xlsx") Reviews = Resturants['Cleaned_Reviews'].tolist() polarity = Resturants['polarity'].tolist() num = len(whole) num1 = len(Reviews) count = 0 countlist = [] targetword = '' targetreview = '' average = float(0) total = float(0) averagelist = [] for i in range (0, num): targetword = whole[i] for x in range( 0, num1): targetreview = str(Reviews[x]) if targetreview.find(targetword) >= 0: count = count + 1 total = total + polarity[x] targetreview = "" average = total/count averagelist.append(average) countlist.append(count) count = 0 average = 0 total = 0 targetword = "" Freq2 = pd.DataFrame() se = pd.Series(whole) Freq2['Word'] = se se = pd.Series(countlist) Freq2['Count'] = se se = pd.Series(averagelist) Freq2['Average_Polarity'] = se Freq2 # - finder = TrigramCollocationFinder.from_words(data) scored = finder.score_ngrams(trigram_measures.raw_freq) set(trigram for trigram, score in scored) sorted(finder.nbest(trigram_measures.raw_freq,20)) # + finder = TrigramCollocationFinder.from_words(data) scored = finder.score_ngrams(trigram_measures.raw_freq) set(trigram for trigram, score in scored) df = sorted(finder.nbest(trigram_measures.raw_freq,20)) num = len(df) tempword = '' wtempword = '' whole = [] for i in range (0, num): tempnum = len(df[i]) for x in range (0, tempnum): tempword = df[i][x] wtempword = wtempword + " " + tempword whole.append(wtempword.lstrip()) tempword = "" wtempword = "" Resturants = pd.read_excel(r"From2016_Sentimental_Burpple.xlsx") Reviews = Resturants['Cleaned_Reviews'].tolist() polarity = Resturants['polarity'].tolist() num = len(whole) num1 = len(Reviews) count = 0 countlist = [] targetword = '' targetreview = '' average = float(0) total = float(0) averagelist = [] for i in range (0, num): targetword = whole[i] for x in range( 0, num1): targetreview = str(Reviews[x]) if targetreview.find(targetword) >= 0: count = count + 1 total = total + polarity[x] targetreview = "" if count == 0 or total == 0: average = 0 else: average = total/count averagelist.append(average) countlist.append(count) count = 0 average = 0 total = 0 targetword = "" Freq3 = pd.DataFrame() se = pd.Series(whole) Freq3['Word'] = se se = pd.Series(countlist) Freq3['Count'] = se se = pd.Series(averagelist) Freq3['Average_Polarity'] = se Freq3 # + everything = Freq.append(Freq1) everything = everything.append(Freq2) everything = everything.append(Freq3) everything = everything.sort_values(['Average_Polarity'], ascending=[0]) everything # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %run ../../common/import_all.py import time from common.setup_notebook import set_css_style, setup_matplotlib, config_ipython config_ipython() setup_matplotlib() set_css_style() # - # # Sorting algorithms # # We will illustrate how different sorting algorithms work, using array # # $$ # \boxed{6}\boxed{5}\boxed{3}\boxed{1}\boxed{8}\boxed{7}\boxed{2}\boxed{4} # $$ # # as the example. This array is used in the Wikipedia pages of these algorithms. We will assume that the task is to sort the array in the ascending order (from the smallest to the largest element), but all the discussion is easily mirrored for the opposite ordering. # # Note that this overview is far from being comprehensive of all possible existing algorithms for sorting! array = [6, 5, 3, 1, 8, 7, 2, 4] # ## Mergesort # # It is a *divide and conquer* algorithm and applies repeated comparison: the array (of length $n$) is first divided into its single components and then bigger and bigger chunks are merged together to recompose the original array, but sorted. # # ### The algorithm # # 1. In the initial stage, the array starts by being divided iteratively into halves until it gets to the level of the cells of each single component. If the array is not even in size, the initial two halves will have different lengths. In our example case we have # # $$ # \boxed{6}\ \ \ \boxed{5}\ \ \ \boxed{3}\ \ \ \boxed{1}\ \ \ \ \boxed{8}\ \ \ \boxed{7}\ \ \ \boxed{2}\ \ \ \boxed{4} # $$ # # 2. Now we need to create couples (subsets of two elements) by merging the single elements two by two. The way we do this is comparing couples of adjacent elements and sort them # # $$ # \boxed{5}\boxed{6}\ \ \ \boxed{1}\boxed{3}\ \ \ \ \boxed{7}\boxed{8}\ \ \ \boxed{2}\boxed{4} # $$ # # 3. Now time for merging into subsets of four elements. We compare adjacent couples for the job: compare the first element of the first with the first element of the second and choose the smallest and put it aside (there is no point in comparing the second elements of the first couple with the first of the second as couples are ordered); then the remaining element of the first couple with the remaining element of the other couple (remaining here stands for the one staying there after the isolation, whichever it is) and so on # # *specifically, in our example we need to merge couples $\boxed{5}\boxed{6}$ and $\boxed{1}\boxed{3}$, so we proceed by comparing 5 to 1, and choose 1; we are left with $\boxed{5}\boxed{6}$ and $\boxed{3}$, so we compare 5 to 3 and choose 3; we are then left with $\boxed{1}\boxed{3}\boxed{5}\boxed{6}$* # # $$ # \boxed{1}\boxed{3}\boxed{5}\boxed{6}\ \ \ \ \boxed{2}\boxed{4}\boxed{7}\boxed{8} # $$ # # 4. Now merge into subsets of eight by using the same procedure: start by comparing the first elements of the two first 4-subsets, choose the smallest; then keep comparing whatever is left on the first subset to what is left on the second subset. Eventually, this leads to # # $$\boxed{1}\boxed{2}\boxed{3}\boxed{4}\boxed{5}\boxed{6}\boxed{7}\boxed{8}$$ # # A great visual representation of the procedure can be seen in [[1]](#merge-viz). # # ### Complexity # # It is a $O(n \log n)$ algorithm, in the worst, best and average cases, because the splits are $O(\log n)$ and there are $n$ comparisons at each split. # ### An implementation # # Note that this implementation, which is recursive, is inspired by [[1]](#mergesort-so). # + def mergesort(my_array): if len(my_array) == 1: return my_array new = [] n = len(my_array) midpoint = int(n/2) right = mergesort(my_array[:midpoint]) left = mergesort(my_array[midpoint:]) while len(right) > 0 and len(left) > 0: if right[0] > left[0]: new.append(left[0]) left.pop(0) else: new.append(right[0]) right.pop(0) new += right + left return new mergesort(array) # - # ## Bubble sort # # It is called this way because smaller items iteratively walk up ("bubble") the top of the array. The algorithm works in such a way that the array is walked repeatedly and adjacent items are swapped if they are in the reverse order. # # ### The algorithm # # * First round of walking the array: # * Let's start from the example array above and start comparing the first two elements, $5 < 6$, so we swap them, creating $\boxed{5}\boxed{6}\boxed{3}\boxed{1}\boxed{8}\boxed{7}\boxed{2}\boxed{4}$ # * After this, we proceed on the next couple in line, $3 < 6$ and we produce another swap # * We continue like this, always comparing the next couple in line, eventually ending up with array $\boxed{5}\boxed{3}\boxed{1}\boxed{6}\boxed{7}\boxed{2}\boxed{4}\boxed{8}$, which concludes the first round # * Secound round of walking the array: # * We proceed in exactly the same way, starting to compare couples from the start again # * ... # * We will eventually end up with a totally sorted array # # A great visual representation of the procedure can be seen in [[2]](#bubble-viz). # # ### Complexity # # The complexity of bubble sort is $O(n^2)$ in the worst and average case, $O(n)$ in the best case (no swaps, meaning array is already sorted). This makes for a bad algorithm in general in terms of performance, due to all the repeated checks it does. It is a quite inefficient algorithm! # ### An implementation # # Which is also recursive. # + def bubblesort(my_array): my_array_copy = my_array.copy() n = len(my_array_copy) if (all([my_array_copy[i] < my_array_copy[i+1] for i in range(n-1)])): return my_array_copy for i in range(n-1): if my_array_copy[i] > my_array_copy[i + 1]: my_array_copy[i], my_array_copy[i+1] = my_array_copy[i+1], my_array_copy[i] my_array_copy = bubblesort(my_array_copy) return my_array_copy bubblesort(array) # - # ## Insertion sort # # Insertion sort works in such a way that each element gets pushed to its right location by doing all comparisons with the elements in front of it. # # ### The algorithm # # 1. In the case of our array, start with 6 which is the first number # 2. Now 5 is smaller than 6, so move it, obtaining $\boxed{5}\boxed{6}\boxed{3}\boxed{1}\boxed{8}\boxed{7}\boxed{2}\boxed{4}$ # 3. Proceed on to the next number, which is 3, and move it to the top, obtaining $\boxed{3}\boxed{5}\boxed{6}\boxed{1}\boxed{8}\boxed{7}\boxed{2}\boxed{4}$ # 4. Now on to 1, which gets moved to the top in the same way # 5. Now 8, which stays there # 6. Now 7, which gets placed right before the 8, ending up with $\boxed{1}\boxed{3}\boxed{5}\boxed{6}\boxed{7}\boxed{8}\boxed{2}\boxed{4}$ # 7. ... # 8. Eventually you'll have a sorted array # # ### Complexity # # In the worst and average case, this is a $O(n^2)$ algorithm, in the best case it is a $O(n)$ one (it does $n$ comparisons, this is the case when the array is already sorted. # ### Implementation # + def insertionsort(my_array): my_array_copy = my_array.copy() n = len(my_array_copy) for i in range(1, n): for j in range(i): if my_array_copy[i] < my_array_copy[j]: my_array_copy[i], my_array_copy[j] = my_array_copy[j], my_array_copy[i] return my_array_copy insertionsort(array) # - # ## Selection sort # # This algorithm is somehow similar to the insertion sort. It works by building two lists out of the original array: the sorted one built iteratively and the rest, which shrinks till becoming empty. Have a look at the viz in [[3]](#sel-viz) for an example. # # ### The algorithm # # 1. Initially, the sorted list is empty # 2. Find the smallest elements in the unsorted list and swap it with the leftmost one, so that in our example we get $\boxed{1}\boxed{5}\boxed{3}\boxed{6}\boxed{8}\boxed{7}\boxed{2}\boxed{4}$: this way the sorted list contains 1 and the rest has one element less # 3. Proceed on the next element and keep doing the same till everything is sorted # # ### Complexity # # The complexity is $O(n^2)$ in the best, worst and average case. This is because you have $n-1$ comparisons to find the lowest element, $n-2$ to find the second lowest, ... and so on, effectively meaning you will do $(n-1) + (n-2) + \ldots + 1 = \frac{n(n-1)}{2}$ comparisons. # ## Heapsort # # Heapsort [[2]](#heapsort-paper) works on the same philosophy of selection sort but uses a [heap structure](../data-structures.ipynb#Heap) to find the minimum rather than employing a linear time lookup. # # ### The algorithm # # 1. Build the (min) heap # 2. The sorted array is build by repeatedly extracting the root of the heap (the minimum element) and add to array # 3. Keep going, updating the heap each time # # ### Complexity # # The complexity of heapsort is $O(n \log n)$ in all cases. This is because the heap is built in $O(n)$ time and finding the needed element each time is a $O(\log n)$ operation given the length of heap is $\log n$, with $n$ calls this gives $O(n \log n)$, so a total of $O(n + n \log n) \approx O(n \log n)$. # ### Quicksort # # It is a more efficient that mergesort and heapsort, was published in 1962 [[3]](#quicksort-paper). # # ### The algorithm # # Note that many great examples with step to step approaches are present on the internet, look for instance [[4]](#quick-viz) and [[5]](#quick-video). # # The algorithm is a divide and conquer one which works by splitting the array into two chunks and sorting them separately and iterating the procedure recursively. # # 1. Pick an item from the array, call it *pivot*: in our case let's choose the first one (6) # 2. Sort the elements in such a way that all those smaller than the pivot go before it, all those greater than the pivot go after it (*partition* phase), the goal here is to locate the split point, which is where the pivot has to be placed: # * Start by placing right and left markers as respectively the last element (4) and the one next to the pivot (5), # * you need move the right marker to the first element which is smaller than the pivot and the left marker to the first element which is larger than the pivot by moving them one cell at a time in their respective directions (right towards left and left towards right): in our case find 8 (right marker itself) and 4 (first element smaller than pivot moving the left marker) # * switch these elements, obtaining array $\boxed{6}\boxed{5}\boxed{3}\boxed{1}\boxed{4}\boxed{7}\boxed{2}\boxed{8}$, and now right marker is in 8 and left marker in 4 (where they were before the switch) # * proceed with the same checks now, finding 7 for the right marker and 2 for the left marker # * exchange them, getting $\boxed{6}\boxed{5}\boxed{3}\boxed{1}\boxed{4}\boxed{2}\boxed{7}\boxed{8}$, with right marker in 7 and left marker in 2 # * now if you move the markers, they cross, a sign that you found the right position of the pivot in the order: have to switch the pivot itself with the new value of right (the one after the markers crossing), obtaining $\boxed{2}\boxed{5}\boxed{3}\boxed{1}\boxed{4}\boxed{6}\boxed{7}\boxed{8}$ # * at this point the pivot (6) is in its right place in the ordering as all elements before it are smaller and all elements after it are larger and you got two subarrays to order # * proceed on each of the subarrays in the same way with their pivot and markers, so the whole procedure is recursive # # # ### Complexity # # The complexity is $O(n \log n)$ in the best and average case, $O(n^2)$ in the worst case. # # The worst case verifies when the pivot divides the array into two groups of 0 and $n-1$ items, which then recursively become groups of 0 and $n-2$ items ... leading to $O(n^2)$ comparisons. This happens for sorted arrays. The best case verifies when each split divides the array into two groups of comparable length, which implies each recursive call processes an array of half the size: $\log n$ calls are made as you are splitting $n$ recursively until you reach length 1, so for instance with $n=32$ you go to 16, 8, 4, 2, 1 in 5 calls, which is $\log 32$. At ech level there are then $O(n)$ comparisons, so we get an overall $O(n \log n)$. # # The average case can be proven by solving the recursive relation, you can read it on [Wikipedia](https://en.wikipedia.org/wiki/Quicksort#Average-case_analysis). # ### Implementation # # This code here is admittedly awful (quite imperative). Also note that unlike the previous ones, this method is modifying the original array so to not lose the original order it is better to pass a copy. # + def quicksort(my_array, pivot_index, end_index): my_array_slice = my_array[pivot_index: end_index] if len(my_array_slice) <= 1: return pivot = my_array[pivot_index] l, r = pivot_index + 1, end_index - 1 while l <= r - 1: i = r while i > pivot_index: if my_array[i] < pivot: break i -= 1 r = i i = l while i < end_index: if my_array[i] >= pivot: break i += 1 l = i if l <= r: my_array[l], my_array[r] = my_array[r], my_array[l] if l < r: my_array[l], my_array[pivot_index] = my_array[pivot_index], my_array[l] elif l == r: if my_array[l] < my_array[pivot_index]: my_array[l], my_array[pivot_index] = my_array[pivot_index], my_array[l] else: my_array[r], my_array[pivot_index] = my_array[pivot_index], my_array[r] new_pivot_index = my_array.index(pivot) my_array_slice = my_array[pivot_index: end_index] right = my_array_slice[pivot_index: new_pivot_index] left = my_array_slice[my_array_slice.index(pivot) + 1:] quicksort(my_array, 0, len(right)) quicksort(my_array, new_pivot_index + 1, new_pivot_index + 1 + len(left)) return array_copy = array.copy() array_copy = [7, 1, 3, 8, 2, 0] ('original array', array_copy) quicksort(array_copy, 0, len(array_copy)) array_copy # - # ## What does Python use # # The `sort` and `sorted` built-in methods in Python use a very efficient algorithm called Timsort [[5]](#timsort), designed by specifically for use in Python itself in 2002. # ### Measurement time (!) # # Let's measure the time taken by the implementations we wrote above, and by timsort, on arrays of different length. Note that we wanted to use bubblesort as well but our implementation easily exceeds recursion limits! So we will use mergesort, insertion sort and timsort. # # From the plot below you can see both the quadratic trend of insertion sort, and the fact that timsort is the most efficient one. Note that our implementation of mergesort includes some calls to `.pop`, which may add some overhead. # ## Now let's run these # # Going to run all these codes on arrays of various lengths, scaling as powers of ten, and measuring the time taken. Note that the implementations above may add some unneeded overhead give the way they've been written so the results here shouldn't be taken at face value. # + mergesort_t = [] bubblesort_t = [] insertionsort_t = [] quicksort_t = [] timsort_t = [] #n_range = [10, 100, 1000, 10000] n_range = np.arange(1, 1000, 100) for n in n_range: an_array = random.sample(range(n), n) t = time.process_time() sorted_array = mergesort(an_array) mergesort_t.append(time.process_time() - t) t = time.process_time() sorted_array = bubblesort(an_array) bubblesort_t.append(time.process_time() - t) t = time.process_time() sorted_array = insertionsort(an_array) insertionsort_t.append(time.process_time() - t) t = time.process_time() sorted_array = quicksort(an_array.copy(), 0, len(an_array)) quicksort_t.append(time.process_time() - t) t = time.process_time() sorted_array = sorted(an_array) timsort_t.append(time.process_time() - t) # + plt.figure(figsize=(15,6)) plt.semilogy(n_range, mergesort_t, marker='o', label='mergesort') plt.plot(n_range, bubblesort_t, marker='o', label='bubblesort') plt.plot(n_range, insertionsort_t, marker='o', label='insertionsort') plt.plot(n_range, quicksort_t, marker='o', label='quicksort') plt.plot(n_range, timsort_t, marker='o', label='timsort') plt.legend() plt.title('Time taken by sorting algorithm versus length of array') plt.xlabel('Array length') plt.ylabel('Seconds') plt.show(); # - # ## References # # 1. [QA on StackOverflow about mergesort](https://stackoverflow.com/questions/18761766/mergesort-python) # 2. , *Algorithm 232: heapsort*, **Commun. ACM** 7, 1964 # 3. , [*Quicksort*](https://watermark.silverchair.com/api/watermark?token=AQECAHi208BE49Ooan9kkhW_Ercy7Dm3ZL_9Cf3qfKAc485ysgAAAbEwggGtBgkqhkiG9w0BBwagggGeMIIBmgIBADCCAZMGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMaAi7EWpRqVU9rdJSAgEQgIIBZKbS7se6zBl0kxhap8bYf-aZy292j8fOk0IHqn9XWA8-JxAduErMfXV3ofUyjcneadd1nkVJLSEYdHEHNbaRo2UVOhgnSNNF6YsLndWoXPFYETB7hnhxS2cl4fjpo-uRFITfqIPgChf-F-wmRirrSzaghH4K857rIK7DVEcqqUMaYvm9Y6YYegpseKoSFDn2LowZaoDOoH2pWDnfL5zduUkPX-TsFgAGj2h85rECOngypQvQEckOltXvxoouhzXVsCXH99oHHCOSML5s5WLheQFFDFIt7Ll5SFSsBp7Q643B0LrVoR_n1j1BiBeDknyETDdFqRivYgD3JAIJMeDHeqLAv-37-iE3Q0S5KaqVnnFOuuMvEM5hxIonOULzSYqbJlKZVF61aJ4BiLnd_9vSL2rBtHMIUiuARsHVTX0DI3Ya_Zg1CqmP_1q52txMq7XUfrknCehLX-hBBJ-rDPiCZ80xkDwX), **The Computer Journal** 5.1, 1962 # 4. [Wikipedia on Timsort](https://en.wikipedia.org/wiki/Timsort) # # ### Visual examples # # 1. [A **visual** example of mergesort (from Wikipedia)](https://en.wikipedia.org/wiki/Merge_sort#/media/File:Merge-sort-example-300px.gif) # 2. [A **visual** example of bubblesort (from Wikipedia)](https://en.wikipedia.org/wiki/Bubble_sort#/media/File:Bubble-sort-example-300px.gif) # 3. [A **visual** example of selection sort](https://en.wikipedia.org/wiki/Selection_sort#/media/File:Selection-Sort-Animation.gif) # 4. [A series of **slides** about a quicksort example](https://www.cise.ufl.edu/~ddd/cis3020/summer-97/lectures/lec17/sld003.htm) # 5. [A **video** about quicksort](https://www.youtube.com/watch?v=3OLTJlwyIqQ) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from sklearn.neighbors import NearestNeighbors X = np.array([[2.1, 1.3], [1.3, 3.2], [2.9, 2.5], [2.7, 5.4], [3.8, 0.9], [7.3, 2.1], [4.2, 6.5], [3.8, 3.7], [2.5, 4.1], [3.4, 1.9], [5.7, 3.5], [6.1, 4.3], [5.1, 2.2], [6.2, 1.1]]) plt.figure() plt.title('Input data') plt.scatter(X[:,0], X[:,1], marker='o', s=75, color='black') plt.show() # + k = 5 test_datapoint = [4.3, 2.7] knn_model = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X) distances, indices = knn_model.kneighbors([test_datapoint]) print("K Nearest Neighbors:") for rank, index in enumerate(indices[0][:k], start=1): print(str(rank) + " ==>", X[index]) # - plt.figure() plt.title('Nearest neighbors') plt.scatter(X[:, 0], X[:, 1], marker='o', s=75, color='k') plt.scatter(X[indices][0][:][:, 0], X[indices][0][:][:, 1], marker='o', s=250, color='k', facecolors='none') plt.scatter(test_datapoint[0], test_datapoint[1], marker='x', s=75, color='k') plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf print(tf.__version__) data = input_data.read_data_sets('../data/MNIST', one_hot=True) batch_size = 100 n_batch = data.train.num_samples // batch_size x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) w_1 = tf.Variable(tf.random_normal([784, 10])) b_1 = tf.Variable(tf.zeros([10])) o_1 = tf.nn.tanh(tf.matmul(x, w_1) + b_1) w_2 = tf.Variable(tf.random_normal([10, 1])) b_2 = tf.Variable(tf.zeros([1])) pred = tf.nn.softmax(tf.matmul(o_1,w_2)+b_2) cor = tf.equal(tf.arg_max(y,1),tf.arg_max(pred,1)) acc = tf.reduce_mean(tf.cast(cor, float32)) loss = tf.reduce_mean(tf.square(y-pred)) train = tf.train.AdagradOptimizer(0.1).minimize(loss) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(21): for b in range(n_batch): b_x, b_y = data.train.next_batch(batch_size) sess.run(train,feed_dict={x:b_x, y:b_y}) accp = sess.run(acc,feed_dict={x:test.data.images,y:data.test.labels}) print(i, accp) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: bda-labs # language: python # name: bda-labs # --- # # Introduction to DataFrame # ## Initialize Spark # We start with a `SparkSession` # + from pyspark.sql import SparkSession spark = ( SparkSession .builder .appName("intro-to-df") .master("local") .getOrCreate() ) # - # ## Loading a CSV file into a DataFrame # See what do we have in the `data` folder # ! ls data # Load the `linkage.csv` file using the `csv` method on the Reader API help(spark.read) help(spark.read.csv) linkage = spark.read.csv("data/linkage.csv") # Check what do we have linkage # We can look at the head of the DataFrame calling the `show` method help(linkage.show) linkage.show() # **Can anyone spot what's wrong with the above data?** # # ... # # --> **What are these "question marks"??** # # let's check the schema of our dataframe linkage.printSchema() # **Why everything is a string?** # ## Managing Schema and Null Values # We are going to load the dataframe again, but this time tell the Reader API a couple of things: # # - first row is the header # - treat "?" as null values # - infer the schema from values linkage_df = ( spark.read .option("header", "true") .option("nullValue", "?") .option("inferSchema", "true") .csv("data/linkage.csv") ) df.printSchema() # **Now the schema looks better!** # # What about the values? df.show() # ## Transformations and Actions # Creating a DataFrame does not cause any distributed computation in the cluster. **A DataFrame is a data set representing an intermediate step in a computation**. # # For operating data (in a distributed manner), we have two type of operations: **transformations** and **actions**: # # - Transformations: lazy evaluation. They're not computed immediately, but they are recorded as a lineage for query plan optimization. # - Actions: distributed computation occurs after invoking an action # # Let's see how many records do we have in our DataFrame df.count() # We can use the `collect` action to return an `Array` with all the `Row` objects in our DataFrame. # # **Such `Array` will reside in local memory!!** df.collect() # ## Write to Disk # # What if we want to write the dataframe to disk, say in a different format? df.write.format("parquet").save("data/linkage-parquet") # ! ls data/linkage-parquet # ## Analyzing Data # # All good for now, but we don't load data for the sake of it, we do it because we want to run some analysis. # # Let's show the first 5 records of our dataframe df.show(5) # **What can we spot from here?** # # - First two fields are integer IDs. There represent the patients that were matched in the record # - The next nine values are numeric values (int and double) representing match scores on different fields, such as their names, sex, birthday, and locations. Fields are stored as integeres where the possible values are match (1) or no-match (0), and doubles whenever partial matches are possible # - The last field is a boolean value indicating whether or not the pair of patient records represented by the line was a match. # # **We could use this dataset to build a simple classifier that allows us to predict whether a record will be a match based on the values of the match scores for patient records.** # ### Caching # # Each time we process data (e.g., calling the `count` method), Spark re-opens the file, parses the rows, and then executes the action requested. # It does not matter if we have filtered the data and created a smaller set of records. Spark will execute all over again. Indeed, this is intended. Fault-tolerance would not be possible without this feature! # # We can use the `cache` method to indicate Spark to store the DataFrame in memory. help(df.cache) # **Do you remember the following question from our last class?" # # - Spark is in-memory only. Myth or misconception? # # **Can you spot something from above that helps you to answer the question?** df_cached = df.cache() # The contents of the DataFrame `df_cached` are going to be stored in memory the next time it's computed. df_cached.count() df_cached.take(10) # The `take` method from above is accessing the cached elements of `df_cached` instead of recomputing them from their dependencies (i.e., the "lineage"). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 import pickle import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.feature_extraction import DictVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score # parameters C = 1.0 n_splits = 5 output_file = f'model_C={C}.bin' # data preparation df = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv') df.columns = df.columns.str.lower().str.replace(' ', '_') categorical_columns = list(df.dtypes[df.dtypes == 'object'].index) for c in categorical_columns: df[c] = df[c].str.lower().str.replace(' ', '_') df.totalcharges = pd.to_numeric(df.totalcharges, errors='coerce') df.totalcharges = df.totalcharges.fillna(0) df.churn = (df.churn == 'yes').astype(int) df_full_train, df_test = train_test_split(df, test_size=0.2, random_state=1) numerical = ['tenure', 'monthlycharges', 'totalcharges'] categorical = [ 'gender', 'seniorcitizen', 'partner', 'dependents', 'phoneservice', 'multiplelines', 'internetservice', 'onlinesecurity', 'onlinebackup', 'deviceprotection', 'techsupport', 'streamingtv', 'streamingmovies', 'contract', 'paperlessbilling', 'paymentmethod', ] # training def train(df_train, y_train, C=1.0): dicts = df_train[categorical + numerical].to_dict(orient='records') dv = DictVectorizer(sparse=False) X_train = dv.fit_transform(dicts) model = LogisticRegression(C=C, max_iter=1000) model.fit(X_train, y_train) return dv, model def predict(df, dv, model): dicts = df[categorical + numerical].to_dict(orient='records') X = dv.transform(dicts) y_pred = model.predict_proba(X)[:, 1] return y_pred # validation print(f'doing validation with C={C}') kfold = KFold(n_splits=n_splits, shuffle=True, random_state=1) scores = [] fold = 0 for train_idx, val_idx in kfold.split(df_full_train): df_train = df_full_train.iloc[train_idx] df_val = df_full_train.iloc[val_idx] y_train = df_train.churn.values y_val = df_val.churn.values dv, model = train(df_train, y_train, C=C) y_pred = predict(df_val, dv, model) auc = roc_auc_score(y_val, y_pred) scores.append(auc) print(f'auc on fold {fold} is {auc}') fold = fold + 1 print('validation results:') print('C=%s %.3f +- %.3f' % (C, np.mean(scores), np.std(scores))) # training the final model print('training the final model') dv, model = train(df_full_train, df_full_train.churn.values, C=1.0) y_pred = predict(df_test, dv, model) y_test = df_test.churn.values auc = roc_auc_score(y_test, y_pred) print(f'auc={auc}') # # Save the model # f_out = open(output_file, 'wb') # pickle.dump((dv, model), f_out) # f_out.close() # OR with autoclose: with open(output_file, 'wb') as f_out: pickle.dump((dv, model), f_out) print(f'the model is saved to {output_file}') # - customer = df[categorical+numerical].iloc[[0]].to_dict(orient='records') customer # + import pickle # sklearn required input_file = 'model_C=1.0.bin' with open(input_file, 'rb') as f_in: dv, model = pickle.load(f_in) print(f'the model is loaded from {output_file}') df_customer = pd.DataFrame.from_dict(customer) y_pred_customer = predict(df_customer, dv, model) y_pred_customer # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### - TITANIC # # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler # - data = pd.read_csv('train.csv') data.head() print(str(data.shape)) # + #data['Sex'].unique() # - #Eliminamos el id de pasajero ya que solo es ruido en la data data.drop('PassengerId', axis=1, inplace=True) data.drop('Name', axis=1, inplace=True) print(str(data.shape)) # ### Analisis de Datos ## Analisis de la data data.describe() data.dtypes var_with_na = [var for var in data.columns if(data[var].isnull().sum() > 0)] len(var_with_na) data[var_with_na].isnull().mean().sort_values(ascending=False) data[var_with_na].isnull().mean().sort_values(ascending=False).plot.bar() plt.ylabel("Porcentaje de NAs") plt.axhline(y=0.9, color="r") plt.axhline(y=0.75, color="g") def na_impact_over_target(df, Xvar, Yvar): df = data.copy() df[Xvar] = np.where(df[Xvar].isnull(), 1, 0) temp = df.groupby(Xvar)[Yvar].agg(['mean', 'std']) temp.plot(kind="barh", y="mean", xerr="std", legend=False, title=Yvar) plt.show() for var in var_with_na: na_impact_over_target(data, var, 'Survived') # ### Transformacion de Variables X_train, X_test, y_train, y_test = train_test_split( data.drop(['Survived'], axis=1), data['Survived'], test_size=0.2, random_state=2021) X_train.shape, X_test.shape # ### Variables Categoricas cat_vars = [var for var in data.columns if data[var].dtype == 'O'] cat_vars = cat_vars + ['Pclass'] cat_vars X_train[cat_vars] = X_train[cat_vars].astype('O') X_test[cat_vars] = X_test[cat_vars].astype('O') cat_vars_with_na = [var for var in cat_vars if X_train[var].isnull().sum() > 0] # ## Variables Numericas num_vars = [var for var in X_train.columns if var not in cat_vars and var != ('Survived', 'Pclass')] num_vars # # Tratamiento de Variables con NA # ##### Variables Categoricas con NA cat_vars_with_na X_train[cat_vars_with_na].isnull().mean().sort_values(ascending = False) # Vemos que la variable "Cabin" muestra que más de la mitad de los registros están en blanco, por lo que podemos descartar dicha variable (además que es una variable categórica). # + #data.drop('Cabin', axis=1, inplace=True) # + #variables tratadas con etiqueta de faltante por cantidad masiva de faltantes. vars_with_missing_string = [var for var in cat_vars_with_na if X_train[var].isnull().mean() > 0.2] #variables tratadas con procedimiento por candiad adecuada de faltantes. vars_freq_category = [var for var in cat_vars_with_na if X_train[var].isnull().mean() <= 0.2] # - vars_with_missing_string vars_freq_category #Faltantes con etiqueta missing X_train[vars_with_missing_string] = X_train[vars_with_missing_string].fillna('Missing') X_test[vars_with_missing_string] = X_test[vars_with_missing_string].fillna('Missing') for var in vars_freq_category: mode = X_train[var].mode()[0] X_train[var].fillna(mode, inplace=True) X_test[var].fillna(mode, inplace=True) print(var, "-------", mode) X_train[cat_vars_with_na].isnull().mean().sort_values(ascending = False) cat_vars_with_na = [var for var in cat_vars if X_train[var].isnull().sum() > 0] cat_vars_with_na # #### Variables Numericas con NA ##Numéricas con faltantes num_vars_with_na = [var for var in num_vars if X_train[var].isnull().sum() > 0] X_train[num_vars_with_na].isnull().mean() for var in num_vars_with_na: mean_val = X_train[var].mean() print(var, mean_val) X_train[var].fillna(mean_val, inplace=True) X_test[var].fillna(mean_val, inplace=True) X_train[num_vars_with_na].isnull().mean() # ### Transformacion Variable de Precio #Aplicamos transformación logaritmica para el precio #for var in ['Fare']: # X_train[var] = np.log(X_train[var]) # X_test[var] = np.log(X_test[var]) # ### Codificacion Variables Categoricas # + sex_mapping = {'male':1, 'female':2} var = 'Sex' X_train[var] = X_train[var].map(sex_mapping) X_test[var] = X_test[var].map(sex_mapping) # - # ### Codificacion Otras Variables other_cat = ['Pclass'] + ['SibSp'] + ['Parch'] + ['Ticket'] + ['Embarked'] + ['Cabin'] #funcion para encontar etiquetas raras def find_rare_labels_freq(df, var, rare_perc): temp = df.groupby(var)[var].count()/len(df) return temp[temp > rare_perc].index for var in other_cat: freqValue = find_rare_labels_freq(X_train, var, 0.01) #print(var, '--------', freqValue) X_train[var] = np.where(X_train[var].isin(freqValue), X_train[var], 'Rare') X_test[var] = np.where(X_test[var].isin(freqValue), X_test[var], 'Rare') def replace_category_vals(train, test, y_train, var, target): temp = pd.concat([train, y_train], axis=1) order_labels = temp.groupby([var])[target].mean().sort_values().index ordinal_values = {k: i for i, k in enumerate(order_labels, 0)} print(var, ordinal_values) train[var] = train[var].map(ordinal_values) test[var] = test[var].map(ordinal_values) for var in other_cat: replace_category_vals(X_train, X_test, y_train, var, 'Survived') def analyse_other_cats(train, y_train, var): temp = pd.concat([train, y_train], axis=1) temp.groupby(var)['Survived'].median().plot.bar() plt.title(var) plt.ylabel('Survied') plt.show() for var in other_cat: analyse_other_cats(X_train, y_train, var) # # Construccion de Modelo # ### Feature Scaling # + scaler = MinMaxScaler() scaler.fit(X_train) X_train = pd.DataFrame( scaler.transform(X_train), columns=X_train.columns ) X_test = pd.DataFrame( scaler.transform(X_test), columns=X_test.columns ) # - # ## Seleccion de Variables from sklearn.linear_model import Lasso from sklearn.feature_selection import SelectFromModel # + #objeto selector de variables sel_ = SelectFromModel(Lasso(alpha=0.001, random_state=0)) sel_.fit(X_train, y_train) # - sel_.get_support().sum() selected_features = X_train.columns[(sel_.get_support())] selected_features # ### Entrenamiento del Modelo X_train = X_train[selected_features] X_test = X_test[selected_features] # + linear_model = Lasso(alpha=0.001, random_state=2022) linear_model.fit(X_train, y_train) # - # ### Predicciones from sklearn.metrics import mean_squared_error, r2_score preds_train = linear_model.predict(X_train) # + #Resultados del Train print("MSE de Entrenamiento: {}".format(mean_squared_error(np.exp(y_train), np.exp(preds_train)))) print("RMSE de Entrenamiento: {}".format(mean_squared_error(np.exp(y_train), np.exp(preds_train), squared=False))) print("R2 de Entrenamiento: {}".format(r2_score(np.exp(y_train), np.exp(preds_train)))) # - preds_test = linear_model.predict(X_test) # + #Resultados del Test print("MSE de Entrenamiento: {}".format(mean_squared_error(np.exp(y_test), np.exp(preds_test)))) print("RMSE de Entrenamiento: {}".format(mean_squared_error(np.exp(y_test), np.exp(preds_test), squared=False))) print("R2 de Entrenamiento: {}".format(r2_score(np.exp(y_test), np.exp(preds_test)))) # - # ## # + #import joblib # + #joblib.dump(linear_model, 'final_model/lasso_reg.joblib') # - # # PIPELINE # + from sklearn.pipeline import Pipeline from feature_engine.imputation import( AddMissingIndicator, MeanMedianImputer, CategoricalImputer ) from feature_engine.encoding import ( RareLabelEncoder, OrdinalEncoder ) from feature_engine.transformation import LogTransformer from feature_engine.selection import DropFeatures from feature_engine.wrappers import SklearnTransformerWrapper #import my_preprocessors as mypp # + ### Configuracion de Modelo Pipeline # - X_train.shape, X_test.shape # ### Configuración del Machine Learning Pipeline # + #Mapeos de variables categoricas SEX_MAPPINGS = {'male':1, 'female':2} #Variables numéricas con NA NUMERICAL_VARS_WITH_NA = ['Age'] DROP_FEATURES = ['PassengerId', 'Name'] #Variables seleccionadas según análisis de Lasso FEATURES = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Cabin', 'Embarked'] # - # + titanic_pipeline = Pipeline([ #3. Indicamos Faltante en variables numéricas para imputar ('missing_indicator', AddMissingIndicator(variables=NUMERICAL_VARS_WITH_NA)), #6. Drop de variables ('drop_features', DropFeatures(features_to_drop=DROP_FEATURES)), #=============== CODIFICACION DE VARIABLES CATEGORICAS ORDINALES ============== #('mapper_quality', Mapper( # variables=QUAL_VARS, mappings=QUAL_MAPPINGS)), #=========== SCALER ============== ('scaler', MinMaxScaler()), #=========== ENTRENAMIENTO DEL MODELO ============ ('Lasso', Lasso(alpha=0.01, random_state=2022)) ]) # - X_trainv3 = X_train[FEATURES] preds = titanic_pipeline.predict(X_test) mean_squared_error(np.exp(y_test), np.exp(preds), squared=False) import joblib #Guardamos pipeline joblib.dump(titanic_pipeline, 'titanic_pipeline.pkl') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: galactic-spin # language: python # name: galactic-spin # --- # + [markdown] tags=[] # ### Instructions # + [markdown] tags=[] # When running the notebook the first time, make sure to run all cells before making changes in the notebook. Hit Shift + Enter to run the selected cell or, in the top menu, click on: `Kernel` > `Restart Kernel and Run All Cells...` to rerun the whole notebook. If you make any changes in a cell, rerun that cell. # # If you make any changes in a coding cell, rerun the notebook by `Run` > `Run Selected Cell and All Below` # - # Importing Python libraries and dependencies for our calculations import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # Ignore warnings warnings.simplefilter(action='ignore', category=UserWarning) # Ignore warnings import time # Timing the runtime of the notebook startTime = time.time() import matplotlib.image as mpimg # Matplotlib library for displaying images from matplotlib import pyplot as plt # Matplotlib library for plotting import numpy as np # Numpy library for basic calculations import scipy.integrate as si # For integration import scipy.optimize as so # For finding the root of the gamma function import scipy.special as ss # For the gamma function import sys; sys.path.append('python/'); import bulge_library as b # For parameters of the 3 galaxies # # Bulge Rotation Curve # ![A poster, reading: CHOOSE: EMPIRICAL OR THEORETICAL? Astrophysicists have developed models of the 'galactic bulge', the bright central component of spiral galaxies. Some of these models depend on empirical data sets as input, while more theoretical models only require empirically-derived parameters as input. Empirically-based models can be more accurate, however, purely-mathematical models can help us extrapolate, understand, and extend our knowledge beyond what can only be directly observed. When making your own bulge component, you'll get to choose your path: empirical or theoretical rotation curve models. CHOOSE: WHICH GALAXY? (End text) At the bottom of the poster, there are three labeled galaxy images. The first: NGC 5533, Constellation: BOOTES, DISTANCE: 177 MLY, SIZE: 50,000 LY. The second: NGC 891, CONSTELLATION: CAMELOPARDALIS, DISTANCE: 27.3 MLY, SIZE: 120,000 LY. The last: NGC 7814, CONSTELLATION: PEGASUS, DISTANCE: 40 MLY, SIZE: 80,000 LY.](images/bulge.png) # + tags=["parameters"] galaxy = "NGC891" # Change this value to select a galaxy out of the 3 choices: NGC5533, NGC891, NGC7814 # - # After choosing a galaxy, restart the notebook by selecting `Kernel` > `Restart Kernel and Run All Cells` in the top bar. # Writing the chosen galaxy name in a text file would allow us # to use the selection in libraries outside of this notebook f = open("python/galaxy_bulgeactivity.txt", "w") f.write(galaxy) f.close() f = open("python/galaxy_bulgeactivity.txt", "r") f.close() #NBVAL_IGNORE_OUTPUT #Because the filepath won't be the same across devices import importlib importlib.reload(b) # Reload library so the changes take effect # + [markdown] tags=[] # ### Introduction # - # __What is a bulge?__ #
A bulge is the bright central part of a spiral galaxy. In shape, bulges are ellipsoidal (spherically squished, to be scientific) and dense with stars, containing most of a spiral galaxy's visible matter. # ### Equations # Calculate the rotational velocity due to stars in the bulge using the empirically derived bulge parameters. # # >__Gamma function__:
# \begin{equation} # \gamma(b_n,2n) = \frac{1}{2} \Gamma(2n) # \end{equation}
# \begin{equation} # r_e = b_n^n r_0 b_n # \end{equation}
# >__Luminosity in terms of central surface brightness__:
# \begin{equation} # I_0 = L_b \frac{(b_n)^{2n}}{2\pi n \ r^2_e \ \Gamma(2n)} # \end{equation}
#
# >where:
# $I_0$ = central surface brightness
# $L_b$ = luminosity of bulge
# $n$ = concentration parameter
# $r_0$ = characteristic radius (related to $r_e$)
# $r_e$ = effective radius
# # >__Rotational Velocity__:
# # \begin{equation} # V_{B}^{2}(r) = C \int_{m=0}^{r} \left[\int_{\kappa=m}^{\infty} \frac {e^{-\left(\frac{\kappa}{r_0}\right)^{\frac{1}{n}}} \left(\frac{\kappa}{r_0}\right)^{\frac{1}{n}-1}} {\sqrt{\kappa^2-m^2}} d\kappa\right] \times \frac{m^{2}}{\sqrt{r^{2}-m^{2}e^{2}}} dm # \end{equation}
# >where:
# \begin{equation} # C = \frac{4Gq\Upsilon_b I_0}{r_0n} \sqrt{sin^{2}i + \frac{1}{q^{2}} cos^{2}i} # \end{equation}
# \begin{equation} # e = \sqrt{1-q^{2}} # \end{equation}
# $\Upsilon_b$ = mass-to-light ratio
# $e$ = eccentricity of the bulge
# $q$ = intrinsic axis ratio
# $i$ = inclination angle
# $G$ = gravitational constant
# ### Definitions of necessary bulge parameters # Concentration parameter ($n$) [unitless]: also known as the bulge Sersic parameter _"describes the curvature of the profile in a radius-magnitude plot"_ (Noordermeer 2008).
# # Intrinsic axis ratio ($q$) [unitless]: _"q [is] the intrinsic axis ratio of the bulge isodensity surfaces"_ (Noordermeer 2008). This parameter describes how flat a galaxy appears. The eccentricity of the galaxy can be calculated using this parameter: $e$ = $\sqrt{1 - q^{2}}$. #
# # Inclination angle ($i$) [degrees]: the angle of the galaxy from the horizontal. # # Effective radius ($r_e$) [kpc]: _"the radius which encompasses 50 percent of the light"_ within the bulge (Noordermeer 2008).
# # Luminosity ($L_b$) [Lsun]: Total galactic luminosity of the bulge of the galaxy, in units of solar luminosity.
# # Mass-to-light ratio ($\Upsilon_b$) [unitless]: the ratio of how much mass there is compared to how much it is visible. # # Distance ($D$) [Mpc]: the distance to the galaxy is useful when converting angular size to actual size. # ### Literature search # There are **five parameters** you will need to find to calculate the velocity of the bulge of your chosen galaxy: concentration parameter ($n$), intrinsic axis ratio ($q$), inclination angle ($i$), effective radius ($r_e$), luminosity of the bulge ($L_b$). In addition, you may also need to find the distance to the galaxy ($D$). #
Activity 1)
# # >Do some research to find these parameters for any of the three galaxies (NGC 5533, NGC 891, NGC 7814) of your choice. This activity is intended to give you a glimpse into scholarly literature search. One place to start is by searching for your chosen galaxy at https://ui.adsabs.harvard.edu. You can also choose an entirely different galaxy and calculate the velocity of stars in its bulge. Make sure to have the right units for each parameter. # #### Concentration parameter [unitless] #n = ? n = b.n # Import # #### Intrinsic axis ratio [unitless] #q = ? q = b.q # #### Inclination angle [degrees] # Inclination angle is usually given in degrees. We need to convert this to radians. # + #i_degrees = ? i_degrees = b.i # Equation to convert degrees to radians i_rad = i_degrees * (np.pi/180) # - # #### Effective radius [kpc] #re_kpc = ? re_kpc = b.re_kpc # The effective radius might be given in units of arcseconds. Arcsecond is a unit of an angle of how large something appears in the sky. In this case, you can use trigonometry to calculate the actual size. For this computation, find the distance to your galaxy to be able to convert from arcseconds to kiloparsec.
# If the effective radius is already in the right units (in kpc), comment out the next `Code` cell. # # Using a right triangle and a small angle approximation, convert arcseconds to kpc in the following way: # # >__Getting the right units__:
# \begin{equation} # 1 \rm \ arcsec = \frac{1^\circ}{3600} = \frac{\pi}{180^\circ} \times \frac{1^\circ}{3600} = \frac{\pi}{648000} \ radians # \end{equation} #
# \begin{equation} # 1 \rm \ Mpc = 1000 \rm \ kpc # \end{equation}
# >__Arcsec to kpc__:
# \begin{equation} # \tan (\theta) \approx \theta \approx \frac{S}{D} # \end{equation} #
# \begin{equation} # S \rm \ (in \ kpc) \approx \theta \ (in \ rad) \times D \ (in \ kpc) # \end{equation} #
# where:
# $\theta$ = angle the object appears in the sky
# $S$ = actual size of the object
# $D$ = distance to the object
# + ### Converting the effective radius in arcsec to kpc (if needed). ### If the conversion is not needed, comment out this section. try: # Parameters found #re_arcsec = ? #D_Mpc = ? re_arcsec = b.re_arcsec D_Mpc = b.D_Mpc re_rad = re_arcsec * (np.pi / (3600 * 180)) # arcsec to radians D_kpc = D_Mpc * 1000 # Mpc to kpc re_kpc = re_rad * D_kpc # Print value except AttributeError: re_kpc = b.re_kpc print('The effective radius of ' + galaxy + ' is '+"{:.2f}".format(re_kpc)+' kpc') # - # #### Luminosity of the bulge #L = ? Lb = b.Lb # In some cases, it is easier to find the absolute magnitude of the bulge rather than the luminosity. For example: The (R-band) absolute magnitude of the bulge ($M_b$) for NGC 5533 is listed in Source #2 (Noordermeer and Van Der Hulst, 2007), in Table A4 (Appendix) as -21.66. # # Using the zero-point luminosity equation, calculate the luminosity of the bulge (in Watts): # # >__Total luminosity of the bulge__:
# \begin{equation} # L_b = L_0 \times 10^{-0.4 M_b} # \end{equation} #
# >where:
# $L_0 = 3.0128 \times 10^{28} W$ = zero-point luminosity
#Mabs = ? Mabs = b.Mabs # + ### Converting absolute magnitude to luminosity (if needed). ### If the conversion is not needed, comment out this section. # Zero point luminosity in Watts L0 = 3.0128e28 # Solar luminosity in Watts Lsun = 3.838e26 # Equation to find the luminosity in Watts L_Watts = L0 * 10**(-0.4 * Mabs) # Convert Watts to Solar luminosity (assuming the solar luminosity is a constant) # Convert units (Watts to Lsun) Lb = L_Watts / Lsun print("The total luminosity of the bulge is {:.3e} Lsun.".format(Lb)) # - # #### Mass-to-light ratio # For simplicity, we are assuming a mass-to-light ratio of 1. However, this value can be found by fitting the total curve (that contains the bulge curve) to the measured values. You may also find this value during your literature search. ML = 1 # Mass-to-light ratio of bulge [unitless] # ### Calculation # Calculate the velocity of the bulge using the equations discussed above. def v_bulge(r,L=Lb,re=re_kpc,n=n,q=q,i=i_rad,ML=ML): # Constant G = 4.300e-6 # Gravitational constant, [kpc*(km/s)**2/Msun] # Gamma function gammafunction = lambda x: ss.gammainc(2*n,x) * ss.gamma(2*n) - 0.5 * ss.gamma(2*n) bn = so.brentq(gammafunction,0,500000, rtol=0.000001,maxiter=100) # Comes within 1% of exact root within 100 iterations # Calculating I0 I0 = L * bn**(2*n) / (2 * np.pi * n * re**2 * ss.gamma(2*n)) # Central surface brightness # Calculating r0 r0 = re / (bn**n) # Characteristic radius (in kpc) # Integrand 1 integrand1 = lambda x,m: np.exp(-np.power(x/r0,(1/n))) * np.power(x/r0,(1/n)-1) / (np.sqrt(x**2 - m**2)) # Integrate integrand 1 f_integrate1 = lambda m: si.quad(integrand1, m, np.inf, args=(m,))[0] f_vect1 = np.vectorize(f_integrate1) # Define C (constant) C = ((4 * G * q * ML * I0) / (r0 * n)) * (np.sqrt((np.sin(i_rad)**2) + (1/(q**2)) * (np.cos(i_rad)**2))) # Define eccentricity eccentricity = np.sqrt(1 - (q**2)) # Integrand 2 f_integrate2 = lambda m,r: C * f_integrate1(m) * (m**2) / (np.sqrt((r**2) - ((m**2)*(eccentricity**2)))) # Integrate outer function to get velocity squared vsq = lambda r: si.quad(f_integrate2, 0, r, args=(r,))[0] vsqv = np.vectorize(vsq) # Calculate velocity velocity = lambda r: np.sqrt(vsqv(r)) return velocity(r) # Define radius for plot radius = np.linspace(0.01,20,100) # ### Plot the velocity curve for the bulge # Note that this cell will probably take a few minutes to run. Please be patient. plt.figure(figsize=(11.0,8.0)) # Size of the plot plt.plot(radius, v_bulge(radius), linestyle='solid') # x and y values of the plot plt.title(str('Bulge Rotation Curve of ' + galaxy), fontsize=18) # Title of the plot plt.xlabel('Radius (kpc)', fontsize=14) # Labeling the x-axis plt.ylabel('Velocity (km/s)', fontsize=14) # Labeling the y-axis plt.ylim(0,350) # Changing the limit of the y-axis plt.show() #
Question 1)
# # >Does the bulge have any effect on the rotation of the stars at 15 kpc? # #
Question 2)
# # >What effect does decreasing the concentration parameter, $n$, have on the rotation curve? # #
Question 3)
# # >How does the amplitude of the curve changes when increasing the mass-to-light ratio (ML or $\Upsilon_b$) of the bulge? # ### References # >1. (October 22, 2008). **The Rotation Curves of Flattened Sérsic Bulges.** _Monthly Notices of the Royal Astronomical Society_ 385, no. 3: 1359–64. https://doi.org/10.1111/j.1365-2966.2008.12837.x.

# >2. ., and . **The Stellar Mass Distribution in Early-Type Disc Galaxies: Surface Photometry and Bulge–Disc Decompositions.** _Monthly Notices of the Royal Astronomical Society_ 376, no. 4 (April 2007): 1480–1512. https://doi.org/10.1111/j.1365-2966.2007.11532.x.

# >3. , , . **A tale of two galaxies: light and mass in NGC 891 and NGC 7814.** _Astronomy & Astrophysics_. 2011;531:A64. https://doi:10.1051/0004-6361/201116634.

# >4. . et al. (October, 2015) **IAU 2015 Resolution B2 on Recommended Zero Points for the Absolute and Apparent Bolometric Magnitude Scales.** eprint arXiv:1510.06262. https://ui.adsabs.harvard.edu/abs/2015arXiv151006262M/abstract

# >5. .; . (2006). **An Introduction to Modern Astrophysics (2nd ed.).** Pearson. pp. 60–62. ISBN 978-0-321-44284-0.

# >6. . (2001). **An investigation into the prominence of spiral galaxy bulges.** The Astronomical Journal, 121(2), 820–840. https://doi.org/10.1086/318767 #NBVAL_IGNORE_OUTPUT #Because the timing won't be exactly the same each time. executionTime = (time.time() - startTime) print(f'Execution time: {executionTime:.2f} seconds') # ### Literature search check # Did you research and locate the parameter values for your chosen galaxy? Click the 'Reveal!' button below to check your work! The results appear in the `Log Console` (select `View`--> `Show Log Console`). If you got any values wrong, you could refer to the citation by each value to see exactly which paper and table the value can be found in. from IPython.display import display display(b.button) b.button.on_click(b.on_button_clicked) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys from pathlib import Path import matplotlib.pyplot as plt import networkx as nx sys.path.append(str(Path(os.getcwd()).parent)) from nxempowerment.empowerment import graph_node_empowerment from nxempowerment.visualize import plot_graph_with_measure # - # # Network X graphs # There are a number of ways of creating a NetworkX Directed Graph # See [this tutorial](https://networkx.org/documentation/stable/tutorial.html) for more info. # # Nodes can be identified by any object. In this first example we use strings as node identifiers. # # So that NetworkX can plot the graph nicely we supply a (x,y) position for each node as an attribute named 'pos'. # # nodes supplied as a list of tuples of (str, dict) where dict is the attributes for that node nodes = [('a', {'pos': (0, 0)}), ('b', {'pos': (1, 0)}), ('c', {'pos': (2, 0)})] # edges are defined as a list of tuples (n1, n2) where (n1, n2) means an edge between node n1 and node n2 edges = [('a', 'b'), ('b', 'c')] graph = nx.DiGraph() graph.add_nodes_from(nodes) graph.add_edges_from(edges) node_empowerment = graph_node_empowerment(graph, 2) nx.set_node_attributes(graph, node_empowerment, 'empowerment_2_steps') # set the figsize on the graph attributes dict graph.graph['figsize'] = (5,1) plot_graph_with_measure(graph, 'empowerment_2_steps', arrows=True) # Note that because this is a directed graph, from node a in 2 steps the agent can reach node b or node c so empowerement at node a is log_2(2) = 1. From node b the agent can only reach node c so empowerment is log_2(1) = 0. From node c the agent cannot move at all so empowerment is also 0. # # + # Alternatively we can use the (x,y) pos also to be the node id, which is how the GridWorld class works. # NetworkX also allows us to define nodes and edges in one step by passing an Iterable of edges into the constructor. graph = nx.DiGraph([((0,0), (1,0)), ((1,0), (2,0))]) # Now we must add the 'pos' attributes nx.set_node_attributes(graph, {n: n for n in graph.nodes}, 'pos') node_empowerment = graph_node_empowerment(graph, 2) nx.set_node_attributes(graph, node_empowerment, 'empowerment_2_steps') # set the figsize on the graph attributes dict graph.graph['figsize'] = (5,1) plot_graph_with_measure(graph, 'empowerment_2_steps', arrows=True) # - # If we connect edges in both directions between each node then the agent can reach 2 other nodes in 2 steps from each node # + graph = nx.DiGraph([((0,0), (1,0)), ((1,0), (0,0)), ((1,0), (2,0)), ((2,0), (1,0))]) # Now we must add the 'pos' attributes nx.set_node_attributes(graph, {n: n for n in graph.nodes}, 'pos') node_empowerment = graph_node_empowerment(graph, 2) nx.set_node_attributes(graph, node_empowerment, 'empowerment_2_steps') # set the figsize on the graph attributes dict graph.graph['figsize'] = (5,1) plot_graph_with_measure(graph, 'empowerment_2_steps', arrows=True) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### - IMPORTING THE NEEDED PYTHON MODULES AND DATA SETS INTO THE WORKSPACE # + import pandas as pd import numpy as np from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV train = pd.read_csv('train_copy.csv') test = pd.read_csv('test_copy.csv') test_ID = pd.read_csv('test.csv') print(train.shape) print(test.shape) # - # #### - SPLITTING THE DATASET INTO X AND y FOR TRAINING OUR ML ALGORITHMS # + # WE ARE DROPPING THE 'target' pd series(column) FROM X X = train.drop('target', axis=1) y = train['target'] ID = test_ID['ward'] print("Shape of X", X.shape) print("Shape of y", y.shape) print("Shape of ID", ID.shape) # - # #### - IMPORTING OUR MODELS (ML ALGORITHMS) I WANT TO TRAIN WITH THE DATASET from xgboost import XGBRegressor from catboost import CatBoostRegressor from mlxtend.regressor import StackingRegressor # #### -LETS IMPLEMENT SOME FORM OF STACKING USING THE 'StackingRegressor' ALGORITHM # + # CREATING THE ML ALGORITHMS WE WANT TO STACK TOGETHER CAT1 = CatBoostRegressor(metric_period=50) CAT2 = CatBoostRegressor(metric_period=20, eval_metric='RMSE') # CREATING A XGB ALGORITM TO SERVE AS OUT META-REGRESSOR (it aggregates prediction from the stacked models) XG = XGBRegressor() # CREATING PUT STACKED MODEL USING "StackingRegressor" model = StackingRegressor(regressors= [CAT1, CAT2], meta_regressor= XG) # - # #### - TO MAKE THIS FANCY, I DEFINE A FUNCTION TO OUTPUT OUR PREDICTED VALUES INTO A CSV FILE def output_csv(file_name, algorithm = model, X_data = X, y_data= y, ID_column = ID, test_data = test): algorithm.fit(X_data, y_data) pred = algorithm.predict(test_data) output = pd.DataFrame({'ward' : ID_column, 'target' : pred}) output.to_csv(file_name, index=False) # #### - OKAY LET US GENERATE SOME PREDICTIONS # NOTE WE ONLY SUPPLIED ONE ARGUMENT...BECAUSE THE OTHER ARGUMENTS HAVE DEFAULT VALUES ATTACHED TO THEM output_csv("Stacking_Prediction.csv") # ## - THAT ALL FOR NOW # ## - FEEL FREE TO MAKE PULL REQUESTS, FORK, CLONE AND DOWNLOAD THIS REPO AND IMPROVE ON THIS MEAGRE MODEL OF MINE...vielen dank (THANK YOU!!!) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # ### 1. There is a positive correlation between the mouse weight and the average tumor volume. # # ### 2. The Drug Regimen “Capomulin” has the maximum mice number followed by the drug Regimen “Ramicane”. The Drug Regimen “Propriva” has the smallest number of mice. There is a total number of 249 mice. # # # ### 3. There are more male mice than female. There is 51% of male mice and there is 49% of female mice. # # %matplotlib notebook import matplotlib.pyplot as plt import numpy as np import pandas as pd # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st from scipy.stats import linregress # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset combined = pd.merge(mouse_metadata, study_results, on=('Mouse ID')) # Display the data table for preview combined.head() # - # Checking the number of mice. mice = len(combined['Mouse ID'].value_counts()) print(mice) # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. duplicate_mice = combined.loc[combined.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique() print(duplicate_mice) # Optional: Get all the data for the duplicate mouse ID. duplicate_mice_data = combined.loc[combined['Mouse ID'] == 'g989'] duplicate_mice_data.head(20) combined.rename(columns = {'Mouse ID':'Mouse_ID'}, inplace = True) # Create a clean DataFrame by dropping the duplicate mouse by its ID. clean = combined[combined.Mouse_ID != 'g989'] clean.head() clean.to_csv("clean.csv") # Checking the number of mice in the clean DataFrame. mice_number = len(clean['Mouse_ID'].value_counts()) print(mice_number) # ## Summary Statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Use groupby and summary statistical methods to calculate the following properties of each drug regimen: # mean, median, variance, standard deviation, and SEM of the tumor volume. # Assemble the resulting series into a single summary dataframe. mean = clean.groupby(['Drug Regimen']).mean()['Tumor Volume (mm3)'] mean_df = mean.to_frame() mean_df.rename(columns = {'Tumor Volume (mm3)':'Mean'}, inplace = True) median = clean.groupby(['Drug Regimen']).median()['Tumor Volume (mm3)'] median_df = median.to_frame() median_df.rename(columns = {'Tumor Volume (mm3)':'Median'}, inplace = True) variance = clean.groupby(['Drug Regimen']).var()['Tumor Volume (mm3)'] variance_df = variance.to_frame() variance_df.rename(columns = {'Tumor Volume (mm3)':'Variance'}, inplace = True) standard_deviation = clean.groupby(['Drug Regimen']).std()['Tumor Volume (mm3)'] standard_deviation_df = standard_deviation.to_frame() standard_deviation_df.rename(columns = {'Tumor Volume (mm3)':'Standard Deviation'}, inplace = True) SEM = clean.groupby(['Drug Regimen']).sem()['Tumor Volume (mm3)'] SEM_df = SEM.to_frame() SEM_df.rename(columns = {'Tumor Volume (mm3)':'SEM'}, inplace = True) # Merging DataFrames merge1 = pd.merge(mean_df, median_df, on="Drug Regimen") merge2 = pd.merge(merge1, variance_df, on="Drug Regimen") merge3 = pd.merge(merge2, standard_deviation_df, on="Drug Regimen") summary_table = pd.merge(merge3, SEM_df, on="Drug Regimen") summary_table.head() # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Using the aggregation method, produce the same summary statistics in a single line clean.groupby('Drug Regimen').aggregate(['mean', 'median', 'var', 'std', 'sem'])['Tumor Volume (mm3)'] # - # ## Bar and Pie Charts # + # Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas. # Grouping the data by DataFrame by Drug Regimen and calculating the total number of mice using the count function # Creating a DataFrame using the results # Using DataFrame.plot() in order to create a bar chart of the data # Setting a title for the chart # Setting labels mice_drug_regimen = clean.groupby("Drug Regimen").count()["Mouse_ID"] mice_drug_regimen.plot(kind="bar", figsize = (10,5)) plt.title("Total number of Mice per Drug Regimen") plt.xlabel("Drug Regimen") plt.ylabel("Number of Mice") # Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.plt.show() # + # Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot. # Renaming the Drug Regimen column in order to use it later in the "unique" function # Getting the unique values in the Drug Regimen column # Converting the data into a list # Printing the list clean.rename(columns = {'Drug Regimen':'Drug_Regimen'}, inplace = True) drug_regimen = clean.Drug_Regimen.unique() drug_regimen_list = drug_regimen.tolist() drug_regimen_list mice_drug_regimen_list = mice_drug_regimen.tolist() mice_drug_regimen_list fig1, ax1 = plt.subplots(figsize=(10, 5)) x_axis = np.arange(len(mice_drug_regimen)) plt.bar(x_axis, mice_drug_regimen_list, color='b', alpha=0.8, align='center') tick_locations = [value for value in x_axis] plt.xticks(tick_locations, drug_regimen_list) # - gender = clean.Sex.unique() gender_list = gender.tolist() gender_list # + # Generate a pie plot showing the distribution of female versus male mice using pandas # Calculating the total number of mice # Calculating the total number of mice by gender # Calculating the percentage of the total number of mice by gender # Putting results into a DataFrame # Renaming the "Mouse_ID" column total_mice = clean['Mouse_ID'].count() mice_sex = clean.groupby("Sex").count()["Mouse_ID"] mice_sex_per = (mice_sex / total_mice) * 100 mice_sex_per = mice_sex_per.to_frame() mice_sex_per.rename(columns = {'Mouse_ID':'Total_Mice_Gender %'}, inplace = True) mice_sex_per.head() # Creating Pie chart colors = ['yellow','green'] explode = (0.1,0) plot = mice_sex_per.plot.pie(y='Total_Mice_Gender %',figsize=(10,5), colors = colors, startangle=140, explode = explode, shadow = True, autopct="%1.1f%%") plt.axis("equal") plt.title("Distribution of Female vs Male mice") plt.show() # + # Generate a pie plot showing the distribution of female versus male mice using pyplot # Getting the unique values in the "Sex" column # Converting the results into a list to be used when charting gender = clean.Sex.unique() gender_list = gender.tolist() gender_list # Calculating the total number of mice # Calculating the total number of mice by gender # Calculating the percentage of the total number of mice by gender # Putting results into a DataFrame # Renaming the "Mouse_ID" column # Converting the new column to a list to be used when charting total_mice = clean['Mouse_ID'].count() mice_sex = clean.groupby("Sex").count()["Mouse_ID"] mice_sex_per = (mice_sex / total_mice) * 100 mice_sex_per2 = mice_sex_per.to_frame() mice_sex_per2.rename(columns = {'Mouse_ID':'Total_Mice_Gender'}, inplace = True) mice_sex_list = mice_sex_per2['Total_Mice_Gender'].tolist() # Telling matplotlib to create a pie chart based upon the above data colors = ['yellow','green'] explode = (0.1,0) fig1, ax1 = plt.subplots(figsize=(10, 5)) plt.pie(mice_sex_per, explode=explode, labels = gender_list, colors=colors, autopct="%1.1f%%", shadow=True, startangle=140) plt.axis("equal") plt.title("Distribution of Female vs Male mice") plt.show() # - # ## Quartiles, Outliers and Boxplots clean.head() # + # Calculate the final tumor volume of each mouse across four of the treatment regimens: # Capomulin, Ramicane, Infubinol, and Ceftamin # Start by getting the last (greatest) timepoint for each mouse # Merge this group df with the original dataframe to get the tumor volume at the last timepoint # Finding the greatest value in the Timepoint column using the max function Capomulin = clean.loc[clean["Drug_Regimen"] == "Capomulin",:] Ramicane = clean.loc[clean["Drug_Regimen"] == "Ramicane", :] Infubinol = clean.loc[clean["Drug_Regimen"] == "Infubinol", :] Ceftamin = clean.loc[clean["Drug_Regimen"] == "Ceftamin", :] # Capomulin final_Capomulin = Capomulin.groupby('Mouse_ID').max()['Timepoint'] Capomulin_volume = pd.DataFrame(final_Capomulin) Capomulin_merged = pd.merge(Capomulin_volume, clean, on=("Mouse_ID","Timepoint"),how="left") Capomulin_merged.head() # + # Capomulin quartiles and IQR # Determining outliers using upper and lower bounds Capomulin_tum = Capomulin_merged["Tumor Volume (mm3)"] quartiles = Capomulin_tum.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Capomulin tumors {lowerq}") print(f"The upper quartile of Capomulin tumors {upperq}") print(f"The interquartile range of Capomulin tumors {iqr}") print(f"The median of Capomulin tumors: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # - # Ramicane final_Ramicane = Ramicane.groupby('Mouse_ID').max()['Timepoint'] Ramicane_volume = pd.DataFrame(final_Ramicane) Ramicane_merged = pd.merge(Ramicane_volume, clean, on=("Mouse_ID","Timepoint"),how="left") Ramicane_merged.head() # + # Ramicane quartiles and IQR # Determining outliers using upper and lower bounds Ramicane_tum = Ramicane_merged["Tumor Volume (mm3)"] quartiles = Ramicane_tum.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Ramicane tumors is: {lowerq}") print(f"The upper quartile of Ramicane tumors is: {upperq}") print(f"The interquartile range of Ramicane tumors is: {iqr}") print(f"The median of Ramicane tumors is: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # - # Infubinol quartiles and IQR # Determining outliers using upper and lower bounds final_Infubinol = Infubinol.groupby('Mouse_ID').max()['Timepoint'] Infubinol_volume = pd.DataFrame(final_Infubinol) Infubinol_merged = pd.merge(Infubinol_volume, clean, on=("Mouse_ID","Timepoint"),how="left") Infubinol_merged.head() # + # Infubinol quartiles and IQR # Determine outliers using upper and lower bounds Infubinol_tum = Infubinol_merged["Tumor Volume (mm3)"] quartiles = Infubinol_tum.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Infubinol tumors is {lowerq}") print(f"The upper quartile of Infubinol tumors is {upperq}") print(f"The interquartile range of Infubinol tumors is {iqr}") print(f"The median of Infubinol tumors is {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # - # Ceftamin final_Ceftamin = Ceftamin.groupby('Mouse_ID').max()['Timepoint'] Ceftamin_volume = pd.DataFrame(final_Ceftamin) Ceftamin_merged = pd.merge(Ceftamin_volume, clean, on=("Mouse_ID","Timepoint"),how="left") Ceftamin_merged.head() # + # Ceftamin quartiles and IQR # Determining outliers using upper and lower bounds Ceftamin_tum = Ceftamin_merged["Tumor Volume (mm3)"] quartiles = Ceftamin_tum.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of treatment is {lowerq}") print(f"The upper quartile of temperatures is {upperq}") print(f"The interquartile range of temperatures is {iqr}") print(f"The the median of temperatures is {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # + # Generating a box plot of the final tumor volume of each mouse across four regimens of interest box_plot = [Capomulin_tum, Ramicane_tum, Infubinol_tum, Ceftamin_tum] Regimen= ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin'] fig1, ax1 = plt.subplots(figsize=(15, 10)) ax1.set_title('Final Tumor Volume',fontsize = 25) ax1.set_ylabel('Final Tumor Volume (mm3)',fontsize = 14) ax1.set_xlabel('Drug_Regimen',fontsize = 14) ax1.boxplot(box_plot, labels=Regimen, widths = 0.4, patch_artist=True,vert=True) plt.ylim(10, 80) plt.show() # - # Saving image plt.savefig("Images/box_plot.png", bbox_inches = "tight") # ## Line and Scatter Plots # Identifying a Capomulin "Mouse_ID" Capomulin['Mouse_ID'] # Generating a line plot of tumor volume vs. time point for a mouse treated with Capomulin line_df = Capomulin.loc[Capomulin["Mouse_ID"] == "s185",:] line_df.head() # + # Plotting x_axis = line_df["Timepoint"] tumor_size = line_df["Tumor Volume (mm3)"] fig1, ax1 = plt.subplots(figsize=(15, 10)) plt.title('Capomulin treatment of mouse s185',fontsize =25) plt.plot(x_axis, tumor_size,linewidth=2, markersize=15,marker="o",color="blue", label="Fahreneit") plt.xlabel('Timepoint (Days)',fontsize =14) plt.ylabel('Tumor Volume (mm3)',fontsize =14) plt.show() # - # Saving image plt.savefig("Images/line_graph.png", bbox_inches = "tight") # + # Generating a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen fig1, ax1 = plt.subplots(figsize=(15, 10)) average_vol_capomulin = Capomulin.groupby(['Mouse_ID']).mean() marker_size=15 plt.scatter(average_vol_capomulin['Weight (g)'],average_vol_capomulin['Tumor Volume (mm3)'],s=175, color="blue") plt.title('Mouse Weight vs. Average Tumor Volume',fontsize =25) plt.xlabel('Weight (g)',fontsize =14) plt.ylabel('Averag Tumor Volume (mm3)',fontsize =14) plt.show() # - plt.savefig("Images/scatterplot.png", bbox_inches = "tight") # ## Correlation and Regression # + # Calculatng the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen correlation_coefficient = round(st.pearsonr(average_vol_capomulin['Weight (g)'],average_vol_capomulin['Tumor Volume (mm3)'])[0],2) print(f"The correlation between mouse weight and average tumor volume is {correlation_coefficient}") # + # Plotting the linear regression model on top of the previous scatter plot. x_values = average_vol_capomulin['Weight (g)'] y_values = average_vol_capomulin['Tumor Volume (mm3)'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regression_values = x_values * slope + intercept print(f"Slope:{slope}") print(f"Intercept:{intercept}") print(f"rvalue (Correlation coefficient) is {rvalue}") print(f"pandas (Correlation coefficient)is {correlation_coefficient}") print(f"stderr is {stderr}") line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) print(line_eq) # + # Adding the linear regression equation and line to plot fig1, ax1 = plt.subplots(figsize= (15, 10)) plt.scatter(x_values, y_values, s = 175, color="blue") plt.plot(x_values, regression_values, "r-") plt.title('Regression Plot of Mouse Weight vs. Average Tumor Volume',fontsize =20) plt.xlabel('Weight(g)',fontsize =14) plt.ylabel('Average Tumor Volume (mm3)',fontsize =14) ax1.annotate(line_eq, xy= (20, 40), xycoords ='data', xytext = (0.8, 0.95), textcoords = 'axes fraction', horizontalalignment = 'right', verticalalignment = 'top',fontsize = 30, color = "red") print(f"The r-squared is: {rvalue**2}") plt.show() # - # Saving image plt.savefig("Images/linear_regression.png", bbox_inches = "tight") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp radom_forest # - # # codecentric.AI Bootcamp - Random Forests # # ## Aufgaben # # Hier findet ihr eine Reihe von Übungsaufgaben zu Random Forests. # # Folge den Aufgaben und ergänze die ___ in den Code-Abschnitten. # # Die folgenden Pakete werden geladen: #hide from nbdev.showdoc import * # %matplotlib inline #export import numpy as np import pandas as pd import matplotlib.pyplot as plt # ### Aufgabe 1: Datensätze aus Scikit-learn laden # # Für die nachfolgenden Übungsaufgaben wollen wir zwei Datensätze verwenden, einen für Klassifikationsmodelle und einen anderen für Regressionsmodelle. # # - Importiere die Funktion zum Laden des `iris` Datensatzes. # - Importiere die Funktion zum Laden des `boston` Datensatzes. #export # Irisdaten für Klassifikation from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix # - Lade den `iris` Datensatz ohne weitere Argumente anzugeben und gucke dir die Beschreibung an. #export data_iris = load_iris() print(data_iris.DESCR) # - Lade den `iris` Datensatz so, dass du zwei einzelne Objekte zurück bekommst: einen für die Featurewerte und einen für die Antwortvariable. #export features_iris, target_iris = load_iris(return_X_y=True) # ### Aufgabe 2: Daten mit Scikit-learn in Trainings- und Testsets aufteilen # # Nun wollen wir die beiden Datensätze, die wir geladen haben, in Trainings- und Testsets aufteilen. # # - Importiere die entsprechene Funktion für das Aufteilen der Datensätze aus Scikit-learn. # # - Teile die Feature und Targets des `iris` Datensatzes in zwei Sets ein: 80% der Daten sollen in das Trainingsset, 20% in das Testset. # - Stratifiziere die Aufteilung anhand der Antwortvariablen. # + #export X_train_iris, X_test_iris, y_train_iris, y_test_iris = train_test_split( features_iris, target_iris, test_size=0.2, stratify=target_iris, random_state = 42) # - # ### Aufgabe 3: Klassifikation mit Random Forests in Scikit-learn # # Nun sind unsere `iris` Daten bereit für das Trainieren von Modellen. # # - Importiere den Random Forest Klassifikationsalgorithmus. # - Definiere den Random Forest Algorithmus mit den folgenden Hyperparametern: 200 Bäume mit maximaler Tiefe von 3. maximaler Anzahl in Betracht zu ziehender Feature von 3 und aktiviere Out-Of-Bag Sampling. # - Trainiere das so definierte Modell auf den `iris` Trainingsdaten. # + #export rf_model_class = RandomForestClassifier( n_estimators=200, max_depth=3, max_features=3, bootstrap=True, random_state = 42) rf_model_class.fit(X_train_iris, y_train_iris) # - # - Importiere die Funktion zur Erstellung von Kreuzmatrizen aus Scikit-learn. # - Nutze das Random Forest Klassifikationsmodell um Vorhersagen auf den Testdaten zu machen. # - Vergleiche die vorhergesagten Klassen mit den tatsächlichen Klassen des Testsets, indem du die Kreuzmatrix ausgeben lässt. # + #export y_pred_rf_class = rf_model_class.predict(X_test_iris) print(confusion_matrix(y_test_iris, y_pred_rf_class, labels=np.unique(y_test_iris))) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import gym import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import math import random # + env = gym.make('CartPole-v1') num_actions = env.action_space.n state_space_dimensions = env.observation_space.shape[0] print('Actions: {} -- State space dimensions: {}'.format(num_actions, state_space_dimensions)) # - class CartPoleDQN(nn.Module): def __init__(self): super(CartPoleDQN, self).__init__() self.fc1 = nn.Linear(state_space_dimensions, 256) self.fc2 = nn.Linear(256, 64) #self.fc3 = nn.Linear(128, 64) self.head = nn.Linear(64, num_actions) def forward(self, x): x = F.leaky_relu(self.fc1(x)) x = F.leaky_relu(self.fc2(x)) #x = F.leaky_relu(self.fc3(x)) x = F.leaky_relu(self.head(x)) return x class ReplayBuffer(): def __init__(self, capacity): self.capacity = capacity self.position = 0 self.states = [] self.actions = [] self.rewards = [] self.next_states = [] self.done = [] def push(self, state, action, reward, next_state, done): if len(self.states) < self.capacity: self.states.append(None) self.actions.append(None) self.rewards.append(None) self.next_states.append(None) self.done.append(None) self.states[self.position] = state self.actions[self.position] = action self.rewards[self.position] = reward self.next_states[self.position] = next_state self.done[self.position] = done self.position = (self.position + 1) % self.capacity def sample(self, batch_size): indices = np.random.choice(range(len(self.states)), size=batch_size) state_sample = [self.states[i] for i in indices] action_sample = [self.actions[i] for i in indices] reward_sample = [self.rewards[i] for i in indices] next_state_sample = [self.next_states[i] for i in indices] done_sample = [self.done[i] for i in indices] return state_sample, action_sample, reward_sample, next_state_sample, done_sample def __len__(self): return len(self.states) def select_action(state, env, model, epsilon): if random.random() > epsilon: with torch.no_grad(): return model(state).argmax().item() else: return env.action_space.sample() def update_epsilon(epsilon_start, epsilon_end, epsilon_steps, total_steps): return epsilon_end + (epsilon_start - epsilon_end) * math.exp(-1. * total_steps / epsilon_steps) # + def optimize_model(policy_net, target_net, optimizer, memory, batch_size, gamma): state_batch, action_batch, reward_batch, next_state_batch, done_batch = memory.sample(batch_size) state_batch = torch.tensor(state_batch).float().view((batch_size, -1)) action_batch = torch.tensor(action_batch, dtype=torch.int64).view((batch_size)) reward_batch = torch.tensor(reward_batch).float().view((batch_size)) non_final_next_states = torch.tensor([s for s in next_state_batch if s is not None]).float() non_final_mask = torch.tensor(list(map(lambda s: s is not None, next_state_batch)), dtype=torch.bool) state_action_values = policy_net(state_batch) state_action_values = state_action_values.gather(1, action_batch.reshape((batch_size, 1))) next_state_values = torch.zeros(batch_size) next_state_values[non_final_mask] = target_net(non_final_next_states).max(dim=1)[0].float().detach() expected_state_action_values = reward_batch + gamma * next_state_values loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() return loss def train_dqn(env, policy_net, target_net, optimizer, memory, target_update=10, batch_size=32, episodes=100, gamma=0.99, epsilon_start=0.9, epsilon_end=0.05, epsilon_steps=1000000): total_rewards = [] total_steps = 0 epsilon = epsilon_start for episode in range(episodes): done = False state = env.reset() total_rewards.append(0) loss = 0 while not done: env.render() state_tensor = torch.tensor(state).float() action = select_action(state_tensor, env, policy_net, epsilon) next_state, reward, done, _ = env.step(action) if done: next_state = None total_rewards[episode] += reward memory.push(state, action, reward, next_state, done) state = next_state if len(memory) >= batch_size: loss = optimize_model(policy_net, target_net, optimizer, memory, batch_size, gamma) if total_steps % target_update == 0: target_net.load_state_dict(policy_net.state_dict()) total_steps += 1 epsilon = update_epsilon(epsilon_start, epsilon_end, epsilon_steps, total_steps) print('{}/{} Total steps: {} Episode reward: {} Average reward: {} Loss: {} Epsilon: {}'.format(episode, episodes, total_steps, total_rewards[episode], np.mean(total_rewards), loss, epsilon)) # + env = gym.make('CartPole-v1') num_actions = env.action_space.n state_space_dimensions = env.observation_space.shape[0] target_net = CartPoleDQN() policy_net = CartPoleDQN() #policy_net.load_state_dict(target_net.state_dict()) optimizer = torch.optim.Adam(policy_net.parameters()) memory = ReplayBuffer(100000) try: train_dqn(env, policy_net, target_net, optimizer, memory, gamma=0.5, batch_size=128, episodes=1000, epsilon_steps=10000, epsilon_end=0.1) finally: env.close() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.9 64-bit # language: python # name: python3 # --- # # TIL 7 # # 오늘은 내용은 여러번 반복해서 이해하는 것이 좋아보임 # ##### 벡터와 직교분해 # - 내적 # - 투영 # - 투영을 이용해서 특정 벡터를 직교 분할 가능! # # - 직교행렬 # - 직교좌표계 # - 행렬의 열벡터 끼리 내적했을 때 0인 행렬 # - 직교 행렬의 열벡터를 노말라이즈 했을때 # - 정규 직교 행렬 # - 투영을 이용해서 직교 분할한 행렬을 구할 수 있음 # - 역행렬을 계산하지 않고 투영을 이용해서 해를 구할 수 있음! # - 독립된 기저벡터를 갖는 행렬이기때문에 병렬처리 가능 # # - 정규 직교 행렬 # - 내적만으로 해를 구 할 수 있음! # # - QR 분해 # - 일반적인 행렬A은 직교성을 갖지 않음. # - QR 분해는 직교분할과 연관성 # - A = QR , Q(직교행렬), R(상삼각행렬) # - 상삼각행렬은 해를 구하기가 쉬움 (ㄱ 행렬 ) # # - QR 분해 vs LU 분해 # - LU 병렬처리 X # - QR 분해는 메모리 사용이 많음 # # #### SVD , PCA # - SVD (특이값 분해) # - LU, QR 분해는 정방행렬 # - 특이값 분해는 일반적인 m x n 행렬의 분해 # - A = U(회전) D(스케일) V(회전) # # - PCA (주성분 분석) # - 데이터들 집합의 분포성을 분석 # - 데이터의 주성분으로 나누고 증폭함 # - # # # - 인공지능에서 차원을 축소 하는 행위는 근사값을 구하는 행위같음 # #### 벡터 공간과 최소 제곱법 # - 공간 # - 덧셈 연산에 닫힘 # - 스칼라 곱 연산에 닫힘 # # - 열공간 # - 행렬 A의 열벡터들에 대한 가능한 모든 **선형 조합의 결과**를 모은 집합 # # - 최소 제곱법 # - Ax = b (b가 풀리지 않는 경우!?) # - 어떻게든 풀 수 있는 형태로 변경하는 것! 신박한데? # - 최선을 다하는 것 # - b 라는 벡터를 열공간에 투영을해서 그 값을 이용해서 목표를 수정해서 값을 얻는다 # - Ax = b 는 불가능 Ax = projwB (bar_B) 가능 # - b - bar_B # - Ax = b => A_transpos A x_bar = A_transpos b # - x_bar = (A_transpos A)^-1 A_transpos b # - 선형 회귀 # - 2차원 공간의 m개의 정점이 있을 때 이 정점들을 설명 할 수 있는 직선을 구하는 문제 # # #### 통계학, 기본개념 # - 통계학 # - 데이터수집, 구성, 분석, 해석, 표현 # - 기술, 추측 # # - 개념 정의 # - 모집단(population) # - 개체나 사건의 집합 # - 모수(parameter) # - 모집단의 수치적인 특성 # - 표본(sample) # - 모집단에서 선택된 개체, 사건의 집합 # # - 도수(Frequency) # - 정의 # - 발생한 횟수 # - 표현 방법 # - 분포표 # - 막대그래프 # - 질적 자료(비율) # - 히스토그램 # - 양적 자료(숫자인 경우) # + import statistics b = [79, 54,1000] print(statistics.mean(b)) print(statistics.median(b)) # - # - 분산 # - 모분산 (N) # - 표본분산 (n-1) # # print(statistics.variance(b)) # + import numpy as np np.var(b) # - np.var(b, ddof=1) # - 범위 # - max - min 범위 # - 대충 이정도 값들을 갖고있구나 max(b) - min(b) # - 사분위수 # - 자료를 정렬 했을 때 1/4,1/2,3/4 위치에 있는 숫자 np.quantile(b,0.25) # - 사분위수 범위 # - Q3 - Q1 # - 평균보다 더 자세하게 값의 범위를 확인 가능 np.quantile(b,0.75) - np.quantile(b,0.25) # - z-score # - 어떤 값이 평균으로부터 표준푠차와 얼마나 떨어져있는지 의미 # # + import scipy.stats scipy.stats.zscore(b) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3-metabolic] # language: python # name: conda-env-miniconda3-metabolic-py # --- # # Defining the Aerobic Safety Margin # %load_ext autoreload # %autoreload 2 # + from itertools import product import cmocean import intake import matplotlib.pyplot as plt import metabolic as mi import numpy as np import thermodyn import util import xarray as xr # - # ## Load World Ocean Atlas data # # Access `intake` catalog for remote data. catalog = intake.open_catalog("data/catalogs/woa2018-catalog.yml") catalog # ### Generate merged dataset # Loop over variables of interest; generate a combined dataset. # + ds_list = [] variables = dict( oxygen=dict(keep_vars=["o_an", "depth_bnds"]), temperature=dict(keep_vars=["t_an"]), salinity=dict(keep_vars=["s_an"]), ) for variable, info in variables.items(): ds_list.append( catalog[variable](time_code=0) .to_dask()[info["keep_vars"]] .isel(time=0, drop=True) .sel(depth=slice(0, 1000.0)) ) ds = xr.merge(ds_list) ds["pO2"] = thermodyn.compute_pO2( O2=ds.o_an, T=ds.t_an, S=ds.s_an, depth=xr.full_like(ds.t_an, fill_value=1.0) * ds.depth, gravimetric_units=True, ) ds["dz"] = ds.depth_bnds.diff("nbounds").squeeze() ds["area"] = util.compute_grid_area(ds) ds["volume"] = (ds.dz * ds.area).where(ds.t_an.notnull()) ds.volume.attrs["long_name"] = "Volume" ds.volume.attrs["units"] = "m^3" ds.lat.attrs["long_name"] = "Latitude" ds.lat.attrs["units"] = "°N" ds = ds.compute() ds # - # ### Peak and verify # # Make a few plots to ensure that every is as expected. ds.dz.plot(y='depth', yincrease=False, marker='o'); ds.volume.sel(lon=179.5).plot(yincrease=False); ds.pO2.sel(lon=179.5).plot(yincrease=False, vmax=25); ds.pO2.sel(depth=0.0).plot(); ds.pO2.sel(depth=1000.0).plot(); ds.t_an.sel(lon=179.5).plot(yincrease=False); ds.s_an.sel(lon=179.5).plot(yincrease=False); # ## Load trait space data # + curator = util.curator_local_assets() cat = curator.open_catalog() ds_trait_space = cat['trait-space-hires'].to_dask().load() ds_trait_space.trait_spc_active.plot(); # + [markdown] tags=[] # ## Define viable traits # # Loop over trait space and store an index array into geographic space for each viable trait. # + # %%time trait_kji_ndx = {} for v in ['trait_spc_active', 'trait_spc_resting']: # get trait space vars trait_space = ds_trait_space[v] hypoxic_tol, temp_sens = trait_space.dims # loop over trait space and lat bands, sum trait frequency trait_kji_ndx[v] = {} for i, A_parm in enumerate(trait_space[hypoxic_tol].values): trait_kji_ndx[v][i] = {} for j, Eo in enumerate(trait_space[temp_sens].values): # compute metabolic index Phi = mi.Phi(ds.pO2, ds.t_an, A_parm, Eo, dEodT=mi.dEodT_bar) (I,) = np.asarray((1 < Phi) & (Phi <= 2)).ravel().nonzero() trait_kji_ndx[v][i][j] = I # - # ## Volume of viable habitat in trait-space # + # %%time dso_vol = xr.Dataset() viable_trait_mask = np.zeros(ds.pO2.shape) for v in ['trait_spc_active', 'trait_spc_resting']: trait_space = ds_trait_space[v] metabolic_baseline = trait_space.attrs['metabolic_baseline'] hypoxic_tol, temp_sens = trait_space.dims vol = xr.full_like(trait_space, fill_value=0.0) for i, A_parm in enumerate(trait_space[hypoxic_tol].values): for j, Eo in enumerate(trait_space[temp_sens].values): K, J, I = np.unravel_index(trait_kji_ndx[v][i][j], ds.pO2.shape) viable_trait_mask[:] = 0.0 viable_trait_mask[K, J, I] = 1.0 # Phi = mi.Phi(ds.pO2, ds.t_an, Ac=A_parm, Eo=Eo, dEodT=mi.dEodT_bar) # viable_trait_mask = xr.where((1 < Phi) & (Phi <= 2), 1.0, 0.0) vol[i, j] += (ds.volume * viable_trait_mask).sum(['depth', 'lat', 'lon']) vol_percent = 100.0 * vol / ds.volume.sum() vol_percent.attrs['long_name'] = 'Ocean volume' vol_percent.attrs['units'] = '%' vol *= 1e-6 # convert to 10^6 m^3 vol.attrs['units'] = '10$^6$ m$^3$' name = f'vol_habitat_{metabolic_baseline}' dso_vol[name] = vol dso_vol[f'{name}_percent'] = vol_percent dso_vol # - dso_vol.vol_habitat_active.plot.contourf(levels=30); dso_vol.vol_habitat_resting.plot.contourf(levels=30); dso_vol.vol_habitat_active_percent.plot.contourf(levels=30); # + plt.contourf( dso_vol.vol_habitat_active_percent.Eo, 1 / dso_vol.vol_habitat_active_percent.Ac, dso_vol.vol_habitat_active_percent, levels=np.arange(0, 102.5, 2.5), ) cb = plt.colorbar() plt.xlabel(util.attrs_label(dso_vol.Eo.attrs)) inv_attrs = dict(**dso_vol.Ac.attrs) inv_attrs['long_name'] = 'Inverse of hypoxic tolerance' inv_attrs['units'] = 'kPa' plt.ylabel(util.attrs_label(inv_attrs)) cb.ax.set_ylabel(util.attrs_label(vol_percent.attrs)); # - # ## Examine trait frequency as a function of latitude ds.volume.sel(lat=0.5).isel(depth=slice(0, 1)).plot() # + # %%time # construct an ATmax distribution with pre-defined bins ATmax_bin_edge = np.arange(10, 64, 2) ATmax_bin_c = np.vstack((ATmax_bin_edge[:-1], ATmax_bin_edge[1:])).mean(axis=0) ATmax_bins = xr.DataArray( ATmax_bin_c, dims=('ATmax'), coords={'ATmax': ATmax_bin_c}, ) lat_var = xr.full_like(ds.pO2.isel(lon=0, depth=0, drop=True), fill_value=0.0) trait_wgt = xr.full_like(ds.pO2, fill_value=0.0) dsets = {} for v in ['trait_spc_resting', 'trait_spc_active']: # get trait space vars trait_space = ds_trait_space[v] metabolic_baseline = trait_space.attrs['metabolic_baseline'] assert metabolic_baseline in ['active', 'resting'] ATmax = ds_trait_space[f'ATmax_{metabolic_baseline}'] hypoxic_tol, temp_sens = trait_space.dims # initialize trait distributions dso = xr.Dataset() dso['ATmax_dist'] = ATmax_bins * lat_var dso[f'{hypoxic_tol}_dist'] = trait_space[hypoxic_tol] * lat_var dso[f'{temp_sens}_dist'] = trait_space[temp_sens] * lat_var # loop over trait space and lat bands, sum trait frequency for i, A_parm in enumerate(trait_space[hypoxic_tol].values): for j, Eo in enumerate(trait_space[temp_sens].values): if np.isnan(ATmax[i, j]): continue # compute metabolic index # Phi = mi.Phi(ds.pO2, ds.t_an, A_parm, Eo, dEodT=mi.dEodT_bar) # if metabolic_baseline == 'active': # trait_wgt = xr.where((1 < Phi) & (Phi <= 2), trait_space[i, j], 0.0) # else: # trait_wgt = xr.where(Phi > 1, trait_space[i, j], 0.0) K, J, I = np.unravel_index(trait_kji_ndx[v][i][j], ds.pO2.shape) # loop over latitude and accumulate the frequency ndx_bin = np.searchsorted(ATmax_bin_edge, ATmax[i, j], side='left') for kk, jj, ii in zip(K, J, I): freq_j = trait_space[i, j] # * ds.volume[kk, jj, ii] dso['ATmax_dist'][ndx_bin, jj] += freq_j dso[f'{hypoxic_tol}_dist'][i, jj] += freq_j dso[f'{temp_sens}_dist'][j, jj] += freq_j continue trait_wgt.data[:] = 0.0 trait_wgt.data[K, J, I] = trait_space[i, j] for j_lat in range(ds.sizes['lat']): freq_j = (trait_wgt.isel(lat=j_lat) * ds.volume.isel(lat=j_lat)).sum() dso['ATmax_dist'][ndx_bin, j_lat] += freq_j dso[f'{hypoxic_tol}_dist'][i, j_lat] += freq_j dso[f'{temp_sens}_dist'][j, j_lat] += freq_j # normalize so integral is 1.0 for key in dso.data_vars: dso[key] /= dso[key].sum() dsets[v] = dso dsets # - # ### Results # # Make plots of trait distributions as a function of latitude def percentile(dso, var, coord, q): nj = dso.sizes['lat'] cdf = dso[var].cumsum(dim=coord) / dso[var].sum(coord) p = np.ones((len(q), nj)) * np.nan for i in range(len(q)): for j in range(nj): p[i, j] = np.interp(q[i], cdf[:, j], dso[coord]) return p # + fig, axs = plt.subplots(3, 1, figsize=(8, 8), squeeze=False) dso = dsets['trait_spc_resting'] for j, v in enumerate(['ATmax_dist', 'Ao_dist', 'Eo_dist']): ax = axs[j, 0] coord = dso[v].dims[0] ax.pcolormesh(dso[v].lat, dso[v][coord], dso[v], cmap=cmocean.cm.dense) # construct cumulative prob density function p = percentile(dso, v, coord, [0.25, 0.5, 0.75]) ax.plot(dso[v].lat, p[1, :], '-', color='tab:red') ax.plot(dso[v].lat, p[0, :], '--', color='tab:red') ax.plot(dso[v].lat, p[2, :], '--', color='tab:red') if j == 2: ax.set_xlabel(util.attrs_label(dso.lat)) ax.set_ylabel(util.attrs_label(dso[v])) ax.set_title(v) plt.suptitle('Resting metabolism', fontweight='bold') plt.tight_layout(); # + fig, axs = plt.subplots(3, 1, figsize=(8, 8), squeeze=False) dso = dsets['trait_spc_resting'] for j, v in enumerate(['ATmax_dist', 'Ao_dist', 'Eo_dist']): ax = axs[j, 0] coord = dso[v].dims[0] ax.pcolormesh(dso[v].lat, dso[v][coord], dso[v], cmap=cmocean.cm.dense) # construct cumulative prob density function p = percentile(dso, v, coord, [0.25, 0.5, 0.75]) ax.plot(dso[v].lat, p[1, :], '-', color='tab:red') ax.plot(dso[v].lat, p[0, :], '--', color='tab:red') ax.plot(dso[v].lat, p[2, :], '--', color='tab:red') if j == 2: ax.set_xlabel(util.attrs_label(dso.lat)) ax.set_ylabel(util.attrs_label(dso[v])) ax.set_title(v) plt.suptitle('Active metabolism', fontweight='bold') plt.tight_layout(); # + dso = dsets['trait_spc_resting'] ATmax_p_rest = percentile(dso, 'ATmax_dist', 'ATmax', [0.25, 0.5, 0.75]) dso = dsets['trait_spc_active'] ATmax_p_active = percentile(dso, 'ATmax_dist', 'ATmax', [0.25, 0.5, 0.75]) lat = dso.lat fig, ax = plt.subplots() ax.plot(lat, ATmax_p_rest[1, :], '-', label='AT$_{max}$ (resting)') ax.fill_between(lat, ATmax_p_rest[0, :], ATmax_p_rest[2, :], zorder=-100, alpha=0.5) ax.plot(lat, ATmax_p_active[1, :], '-', label='AT$_{max}$ (active)') ax.fill_between(lat, ATmax_p_active[0, :], ATmax_p_active[2, :], zorder=-100, alpha=0.5) ax.plot(ds.lat, ds.t_an.mean(['lon', 'depth']), '-', color='k') ax.set_ylabel('Temperature [°C]') ax.set_xlabel(util.attrs_label(dso.lat)) ax.legend(loc=(1.01, 0)); # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 看看均线策略。 # + import pandas as pd from datetime import datetime import trdb2py isStaticImg = False width = 960 height = 768 pd.options.display.max_columns = None pd.options.display.max_rows = None trdb2cfg = trdb2py.loadConfig('./trdb2.yaml') # - # 我们先指定一个特定的基金,特定的时间段来分析吧。 # + # 具体基金 # asset = 'jrj.510310' # baselineasset = 'jrj.510310' # asset = 'jqdata.000300_XSHG|1d' asset = 'jqdata.000036_XSHG|1d' # 起始时间,0表示从最开始算起 tsStart = 0 tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d')) # 结束时间,-1表示到现在为止 tsEnd = -1 tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d')) # 初始资金池 paramsinit = trdb2py.trading2_pb2.InitParams( money=10000, ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, depositMoney=10000, ) paramsbuy1 = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, nextTimes=1, ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy2 = trdb2py.trading2_pb2.BuyParams( moneyParts=2, ) # 卖出参数,全部卖出 paramssell = trdb2py.trading2_pb2.SellParams( perVolume=1, ) # 卖出参数,全部卖出 paramssell7 = trdb2py.trading2_pb2.SellParams( # perVolume=1, keepTime=7 * 24 * 60 * 60, ) lststart = [1, 2, 3, 4, 5] lsttitle = ['周一', '周二', '周三', '周四', '周五'] # - # 首先看看这个基金的基准表现,就是在开始时间就直接买入,然后一直持有,看具体的收益率。 # + # baseline s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='buyandhold', ) # paramsbuy = trdb2py.trading2_pb2.BuyParams( # perHandMoney=1, # ) # paramsinit = trdb2py.trading2_pb2.InitParams( # money=10000, # ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsInit.CopyFrom(paramsinit) p0 = trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='baseline', ) pnlBaseline = trdb2py.simTrading(trdb2cfg, p0) trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height) # - # 那么策略基准线大概就是这样了,7年多的时间2.2倍。 # + lstparams = [] buy0 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsv', operators=['<='], vals=[-0.015], strVals=['roc.{}'.format(1)], ) sell0 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsv', operators=['>='], vals=[0.02], strVals=['roc.{}'.format(1)], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) s0.buy.extend([buy0]) s0.sell.extend([sell0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) s0.paramsInit.CopyFrom(paramsinit) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='roc[{}-{}]'.format(1.5, 2), )) lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3) trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # + lstparams = [] lstdown = [] lstup = [] os = -0.05 while True: lstdown.append(os) if os >= 0: break os += 0.001 os = 0.05 while True: lstup.append(os) if os <= 0: break os -= 0.001 for dobj in lstdown: for uobj in lstup: buy0 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsv', operators=['<='], vals=[dobj], strVals=['roc.{}'.format(1)], ) sell0 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsv', operators=['>='], vals=[uobj], strVals=['roc.{}'.format(1)], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) s0.buy.extend([buy0]) s0.sell.extend([sell0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) s0.paramsInit.CopyFrom(paramsinit) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='roc[{:0.2f}-{:0.2f}]'.format(dobj * 100, uobj * 100), )) # len(lstparams) lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=3) trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # + dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline]) # dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2] dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # The Code is originally from towardsdatascience.com # + import skimage.data # Reading the image img = skimage.data.chelsea() # Converting the image into gray. (It means, we don't want to use 3 Channels-R,G,B- for make problem easier) img = skimage.color.rgb2gray(img) # + import numpy as np l1_filter = np.zeros((2,3,3)) # 2 : num of filters, 3x3 kernel size # - l1_filter[0, :, :] = np.array([[[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]]) l1_filter[1, :, :] = np.array([[[1, 1, 1], [0, 0, 0], [-1, -1, -1]]]) # + # Testing Code print(img.shape) print(len(img.shape)) print(img.shape[-1]) print(l1_filter.shape) # (num of filters, width of filter, height of filter) # - def conv(img, conv_filter): if len(img.shape) > 2 or len(conv_filter.shape) > 3: # Check if number of image channels matches the filter depth. if image.shape[-1] != conv_filter.shape[-1]: print("Error : Number of channels in both image and filter must match.") sys.exit() if conv_filter.shape[-1] != conv_filter.shape[2]: # Check if filter dimensions are equal. print("Error : Filter must be a square matrix. i.e. number of rows and columns must match.") sys.exit() if conv_filter.shape[1] % 2 == 0: # Check if filter dimensions are odd. print("Error : Filter must have an odd size. i.e. number of rows and columns must be odd.") sys.exit() # An empty feature map to hold the output of convolving the filter(s) with the image. feature_maps = numpy.zeros((img.shape[0]-conv_filter.shape[1] + 1, img.shape[1]-conv_filter.shape[1] + 1, conv_filter.shape[0])) # Dimension of feature_maps : (Image Width(img.shape[0]) - Filter Width(conv_filter.shape[1]) + 1(Bias), Image Height(img.shape[1]) - Filter Height(conv_filter.shape[1] + 1(Bias), Num of channels(conv_filter.shape[0])) # conv_filter.shape[1] == conv_filter.shape[2] (3 == 3) # Convolving the image by the filter(s). for filter_num in range(conv_filter.shape[0]): # for loop within filters(shape[0] is 2 for now) print("Filter ", filter_num + 1) # since idx starts from 0, we add 1 curr_filter = conv_filter[filter_num, :] # getting a filter from the bank. """ Checking if there are multiple channels for the single filter. If so, then each channel will convolve the image. The result of all convolutions are summed to return a single feature map. """ if len(curr_filter.shape) > 2: # shape of curr_filter should be (3,3) so len will be 2(no depth) conv_map = conv_(img[:,:,0], curr_filter[:,:,0]) # Array holding the sum of all feature maps. for ch_num in range(1, curr_filter.shape[-1]): # Convolving each channel with the image and summing the results. conv_map = conv_map + conv_(img[:,:,ch_num], curr_filter[:,:,ch_num]) else: # There is just a single channel in the filter conv_map = conv_(img, curr_filter) feature_maps[:,:, filter_num] = conv_map # Holding feature map with the current filter return feature_maps # Returning all feature maps def conv_(img, conv_filter): # function for convolution operation filter_size = conv_filter.shape[1] # conv_filter : (num of filter, width, height) result = np.zeros((img.shape)) # make tuple size of img # Looping through the image to apply the convolution operation. for r in np.uint16(np.arange(filter_size/2.0, img.shape[0]-filter_size/2.0 + 1)): for c in np.uint16(np.arange(filter_size/2.0, img.shape[1]-filter_size/2.0 + 1)): """ Getting the current region to get multiplied with the filter. How to loop through the image and get the region based on the image and filter sizes is the most tricky part of convolution. """ curr_region = img[r - np.uint16(np.floor(filter_size/2.0)):r + np.uint16(np.ceil(filter_size/2.0)), c - np.uint16(np.floor(filter_size/2.0)):c + np.uint16(np.ceil(filter_size/2.0))] # Element-wise multiplication between the current region and the filter. curr_result = curr_region * conv_filter conv_sum = np.sum(curr_result) # Summing the result of multiplication. result[r, c] = conv_sum # Saving the summation in the convolution layer feature map. # Clipping the outliers of the result matrix. final_result = result[np.uint16(filter_size/2.0):result.shape[0] - np.uint16(filter_size/2.0), np.uint16(filter_size/2.0):result.shape[1] - np.uint16(filter_size/2.0)] return final_result # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sentiment Analysis end-to-end example # # This example is brought to you by Udacity - consider doing the great Udacity Deep Learning course. Find out more [here](https://www.udacity.com/course/deep-learning-nanodegree--nd101). # # > These are my own personal notes # # ---- # In this notebook, the aium is to build `TODO` # # We begin by looking at the dataset we have: # - reviews.txt: reviews of a movie # - labels.txt: positive/negative label associated with the movie # # We will use the python `open()` function to open the file, with the parameter `'r'` to read the file. Using `readlines` will return a list made up of each line in the file, returned as a list item. Hence, each character will be an item in the list. # + review_file = open('sentiment_data/reviews.txt', 'r') reviews = list(map(lambda x : x[:-1], review_file.readlines())) review_file.close() label_file = open('sentiment_data/labels.txt', 'r') labels = list(map(lambda x : x[:-1].upper(), label_file.readlines())) label_file.close() # - # Now lets find some information out about our data. print(f'Size of our data: {len(reviews)}') print(f'No of labels: {len(labels)}') print('\nNow, lets see one row of our data. First feature in our data:') print(reviews[0]) print('\nPrediction:') print(labels[0]) # --- # ## Now, lets build up a hypothesis # # We will begin by looking at our data, and trying to see what conclusions we can draw. This is ofter called the `exploratory` phase. We will begin by looking at some random predictions... def print_review_with_label(ith_row): print(labels[ith_row] + '\t:\t' + reviews[ith_row][:80] + '...') # Using the function above, we can beautifully print our data; feature along with its prediction. print("labels.txt \t : \t reviews.txt\n") print_review_with_label(2137) print_review_with_label(12816) print_review_with_label(6267) print_review_with_label(21934) # We will be using the `Counter` python class throughout this section, as it provides a nice way to count the occurances of words. from collections import Counter import numpy as np # + positive_words_counter = Counter() negative_words_counter = Counter() total_words_counter = Counter() example_counter_with_stuff = Counter([1,2,3,4,4,4]) def counter_pretty_print(): print('positive counter: ', positive_words_counter) print('negative counter: ', negative_words_counter) print('total words counter: ', total_words_counter) print('At this stage, our counters are empty...') counter_pretty_print() print('Here is a test counter: ', example_counter_with_stuff) # - # Now, lets fill out our three counters. # + tags=[] # for each row in our dataset for sentence_no in range(len(reviews)): # for each word in our sentence for word in reviews[sentence_no].split(' '): # if it is positive - add a positive counter if labels[sentence_no] == 'POSITIVE': positive_words_counter[word] +=1 # if it is negative - add to negative counter if labels[sentence_no] == 'NEGATIVE': negative_words_counter[word] +=1 # regardless, add to total word counter total_words_counter[word] +=1 # + tags=[] # lets take a look at the most common words. print('Most common positive words:\n') positive_words_counter.most_common() # - # lets take a look at the most common words. print('\nMost common negative words:\n') negative_words_counter.most_common() # Instead of looking at the counts of the words, lets now instead look at the ratios between words. Looking at how often words occur, either positive or negative, does not really give us what we are looking for. e.g. you can see there are a lot of common words between both the positive and negative counters. Instead, by looking at a raio, we will be looking at the words that are found in positive reviews over negative, and vice versa. # # This will basically tell us how many more times a word is seen in positive reviews than in the negatives. e.g. we can imagine that positive reviews use the word "love" more, hence the ratio should be larger. Hence: # - Positive words will have a large ratio - bigger than 1 # - Negative words will have a smaller ratio - less than 1 # - words that are neither positive or negative, but neutral, will be centered around 0 # + tags=[] positive_to_negative_ratio = Counter() for word, count, in list(total_words_counter.most_common()): if count > 100: positive_to_negative_ratio[word] = positive_words_counter[word] / (negative_words_counter[word] + 1) # +1 so we dont divide by 0 # - # Now, lets take a look at some words... print(f'positive:negative ratio for the word and: {round(positive_to_negative_ratio["and"],2)}') print(f'positive:negative ratio for the word good: {round(positive_to_negative_ratio["best"],2)}') print(f'positive:negative ratio for the word bad: {round(positive_to_negative_ratio["bad"],2)}') # Okay, but is a score of 2 twice as good as other scores? With the ratios as they are now, it will be difficult to actually compare the scores. So instead, we will do what every computer scientists loves to do, which is to log the numbers. # # To find out more about why computer scientists love log, feel free to watch the series by on Machine Learning [here](https://www.youtube.com/watch?v=MrLPzBxG95I&list=PLl8OlHZGYOQ7bkVbuRthEsaLr7bONzbXS) for word, count in positive_to_negative_ratio.most_common(): positive_to_negative_ratio[word] = np.log(count) # Now, lets take a look at the log(words)... print(f'positive:negative ratio for the word and: {round(positive_to_negative_ratio["and"],2)}') print(f'positive:negative ratio for the word good: {round(positive_to_negative_ratio["best"],2)}') print(f'positive:negative ratio for the word bad: {round(positive_to_negative_ratio["bad"],2)}') # You can see now that: # - positive words are close to +1 # - negative words are close to -1 # - neutral words are centered around 0 # # Now, to close our hypothesis section where we wanted to draw a hypothesis from the data, we will take a peek at our ratio data. positive_to_negative_ratio.most_common()[0:20] # As we expected, some positive words like `flawless` and `perfection` have high scores....but also `lincoln`. Interesting. list(reversed(positive_to_negative_ratio.most_common()))[0:20] # There are some funny negative words, including `lousy` and `unwatcheable`. But again, some interesting words like `prom`. # ## Transforming words into numbers # # we now need to prerpare our words so that we can feed them into our neural network. In order to do that, we want to transform them so we can do the maths of neural networks. # # What we want to do for our network, is build a dictionary. With this dictionary, we will count each word in our input review, and feed that into the network. # # As we have already built a Count object that has every word possible from our training data, we are able to now compare each single review from our dataset, and see how often each word occurs per review. This will allow us to feed our reviews into the network whilst maintaining consistency between inputs. # # We will begin by building a `vocab`, a set that contains all the words. vocab = set(total_words_counter.keys()) # Vocab is s Set, similar to the mathematical set. This means that it only has each word appearing only once. # # Now, lets take a look at how our Neural network will look. # # ![image of our neural network](sentiment_network.png) # # You can see that our NN will have: # - one input layer: # - This will be the Vocab # - we will represent this as a np array # - one hidden layer # - one output layer that has one output neuron layer_0 = np.zeros(shape=(1,len(vocab))) # lets take a look at the first layer... layer_0.shape # This first layer now has a neuron/input per word from our vocab. With the input being a count of how many times the word occurs in the review. However, to pass words from a review into this first layer, we need to be able to build a way that will allow us to feed a new review in with the words organised the same way as the first layer in our network. # + word_to_index_translator = {} # lets map each word in our vocab to an index, and capture that as a dictionary for index, word in enumerate(vocab): word_to_index_translator[word] = index # lets temporarily use a Counter object to look at the first few rows in our dictionary Counter(word_to_index_translator).most_common(5) # - # now, lets build a function that can take a new review, and spit out a vector that matches the input layer. def input_for_input_layer(review): ''' New input layer, layer_0, for our network to train on. layer_0 represents how many times a word occurs in a review. Args: review (str) : a review for a movie Returns: None ''' global layer_0 # clear out previous layer 0 layer_0 *=0 for word in review.split(' '): # find index location of the word from our vocab index_of_word = word_to_index_translator[word] # add it to our layer 0 layer_0[:, index_of_word] += 1 # Lets test this by feeding it a review. # Before we test it, lets look at layer_0 layer_0 input_for_input_layer(reviews[200]) layer_0 # Great, it has updated layer_0. # # Now, we will build a function that can take a label (e.g. POSITIVE or NEGATIVE), and return either 1 or 0. This is needed as our network needs to be built ontop of numbers, and not strings. def translate_label(label): '''Converts label to 0 or 1. Args: label (str) : POSITIVE or NEGATIVE label for a review RETURNS: 0 : if negative 1 : if positive ''' if label == 'POSITIVE': return 1 else: return 0 # again, lets test this by running a label into our function. print(f'testing +ve label: {labels[200]}') print(f'This is the output from our function: {translate_label(labels[200])}') print(f'\ntesting -ve label: {labels[1]}') print(f'This is the output from our function: {translate_label(labels[1])}') # Great, so it works. # # Now it is finally time to build our Neural Network! # # We will: # - build a basic neural network that has an input layer, hidden layer and an output layer # - we will not be adding non-linearity in our hidden layer # - we will use the same functions we defined above, to build up our training data set # - we will create a vocab from our training data # - we will train over the entire corpus import sentiment_network import importlib importlib.reload(sentiment_network) mlp = sentiment_network.SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) # + tags=[] mlp.test(reviews[-1000:],labels[-1000:]) # + tags=[] mlp.train(reviews[:-1000],labels[:-1000]) # - mlp2 = sentiment_network.SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001) mlp2.train(reviews[:-1000],labels[:-1000]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') plt.rc('figure', figsize=(10, 6)) googc = pd.read_csv('./GOOG.csv') googc googc.info() googc.sort_values(by=['date']) googc googc = googc.sort_values(by=['date']) googc axes = plt.subplot(1, 1, 1) axes.set_xticks([]) axes.plot(googc['date'], googc['open']) # + googc['date']=pd.to_datetime(googc['date'],format='%Y-%m-%d') googc.index = googc['date'] month = googc.groupby(pd.Grouper(freq='M')) monthDF = month.mean() monthDF # - axes = plt.subplot(1, 1, 1) axes.set_xticks([]) axes.plot(monthDF.index, monthDF['open']) axes = plt.subplot(1, 1, 1) axes.set_xticks([]) axes.scatter(monthDF.index, monthDF['open']) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="CT5xFw9HQynC" # ##**To find the inverse of matrix, the matrix is passed to the linalg.inv() method of the Numpy module:**## # + colab={"base_uri": "https://localhost:8080/"} id="215nIZM3O_O9" outputId="54ab6cc1-9f37-48b9-8564-c34022a23f8d" import numpy as np A = np.array([[4,3],[-5,9]]) #creation of matrix A print(A) inv_A = np.linalg.inv(A) print(inv_A) # + colab={"base_uri": "https://localhost:8080/"} id="sRfgLyWrPBzl" outputId="abc9a607-32b5-4a90-cf82-789c668c51db" B = np.array([[20],[26]]) print(B) # + colab={"base_uri": "https://localhost:8080/"} id="LRSFXD6NPEXN" outputId="845e4656-4bd4-4f15-da55-d10fab940d2c" X = np.linalg.inv(A).dot(B) print(X) # + colab={"base_uri": "https://localhost:8080/"} id="PzdyaPS0PGRe" outputId="30023eac-2bd7-470e-e34d-47c869b2a541" X = np.dot(inv_A,B) print(X) # + [markdown] id="nhwkSYqTPJYl" # ##**ACTIVITY (Let's now solve a system of three linear equations:)**## # + colab={"base_uri": "https://localhost:8080/"} id="NlQJbyVUPItd" outputId="7e76001e-0989-4825-a9c9-7e7853337264" import numpy as np A = np.array([[4,3,2],[-2,2,3],[3,-5,2]]) #creation of matrix A print(A) print() inv_A = np.linalg.inv(A) print(inv_A) # + colab={"base_uri": "https://localhost:8080/"} id="b50nCNEIPPWu" outputId="2d70f171-633d-4f48-cf8b-b3cc91115c30" import numpy as np B= np.array([[25],[-10],[-4]]) print(B) # + colab={"base_uri": "https://localhost:8080/"} id="hUQ2OzAZPR2G" outputId="66109754-c210-41a6-8366-1d78954c38dd" X = np.linalg.inv(A).dot(B) print(X) # + colab={"base_uri": "https://localhost:8080/"} id="wMJH-tORPUWy" outputId="2eaee757-ea93-4236-a596-8eab3685bc54" X = np.dot(inv_A,B) print(X) # + [markdown] id="gj_GmWwgPW8P" # ##**APPLICATION**## # + colab={"base_uri": "https://localhost:8080/"} id="fmCsH-drPZ5Y" outputId="394dae46-37fa-4f04-dcff-a5894efadd5d" import numpy as np A = np.array([[20,10],[17,22]]) #creation of matrix A print(A) print() inv_A = np.linalg.inv(A) print(inv_A) # + colab={"base_uri": "https://localhost:8080/"} id="-NWsMSy8PskA" outputId="26b1914a-2188-4e99-d1ea-cd3f68fe7ac1" import numpy as np B= np.array([[350],[500]]) #creation of matrix B print(B) # + colab={"base_uri": "https://localhost:8080/"} id="EupK7DI-P7eM" outputId="c5bc6f15-7b2d-4543-b0fa-e451635ce179" X = np.linalg.inv(A).dot(B) print(X) # + colab={"base_uri": "https://localhost:8080/"} id="-GkUMpwSP4Es" outputId="7346c729-8e0c-4293-d199-88fbbe81ea07" X = np.dot(inv_A,B) print(X) # + [markdown] id="FXHQiIehRNRT" # ##**To check if the answer is correct**## # + colab={"base_uri": "https://localhost:8080/"} id="lOGsuuzyRMuw" outputId="6f691aa6-67bc-4691-c012-5661842def41" B = np.dot(A,X) print(B) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pandas as pd import numpy as np from matplotlib import pyplot as plt # %matplotlib inline df = pd.read_csv("data/cars.csv", delimiter=";") df[-5:] # - y = np.matrix(df.Car[1:]).T x = np.matrix(df.Car[:-1]).T # displaying X, Y pairs np.hstack([x[-4:], y[-4:]]) # + def linreg_via_np(X, Y, **kwargs): theta, e, r, s = np.linalg.lstsq(X, Y) return theta def linreg_via_syseq(X, Y, **kwargs): theta = (X.T * X).I * X.T * Y return theta # + def prepareX(X, order=1, **kwargs): return np.hstack([ np.power(X, i) for i in range(order + 1) ]) def test(X, Y, trainer, **kwargs): X = prepareX(X, **kwargs) # plotting the original data plt.plot(X[:,1], Y, "rx") model = trainer(X, Y, **kwargs) # plotting the model xTest = np.linspace( int(X[:,1][0] - 1000), int(X[:,1][-1] + 2000), int(X[:,1][-1] - X[:,1][0]) * 2) xTest = prepareX(np.matrix(xTest).T, **kwargs) plt.plot(xTest[:,1], xTest * model) plt.title(kwargs.get("title", "")) plt.show() test(X, Y, linreg_via_np, order=1, title="linreg_via_np order 1") test(X, Y, linreg_via_syseq, order=1, title="linreg_via_syseq order 1") test(X, Y, linreg_via_np, order=3, title="linreg_via_np order 3") test(X, Y, linreg_via_syseq, order=3, title="linreg_via_syseq order 3") # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import pickle from deap import base, creator, tools, algorithms import matplotlib.pyplot as plt import numpy as np creator.create("obj", base.Fitness, weights=(1.0,)) creator.create("Ind", list, fitness=creator.obj) experiments = 40 checkpoint_files = {} for i in range(experiments): with open("coarse_search_"+str(i)+"/checkpoint.pkl", "rb") as cp_file: checkpoint_files[i] = pickle.load(cp_file) def get_ind_list(pop_list, index): ind_list = [] for pop in pop_list: ind_list.append(pop[index]) return ind_list keff_results = {"max":[], "ave":[], "min":[]} for i in range(experiments): logbook = checkpoint_files[i]["logbook"] keff_results["max"].append(get_ind_list(logbook.chapters["oup"].select("max"), 0)[-1]) keff_results["ave"].append(get_ind_list(logbook.chapters["oup"].select("avg"), 0)[-1]) keff_results["min"].append(get_ind_list(logbook.chapters["oup"].select("min"), 0)[-1]) df = pd.read_csv("coarse_search_hyperparameters.csv", index_col="experiment_num") df["keff_max"] = keff_results["max"] df["keff_min"] = keff_results["min"] df["keff_ave"] = keff_results["ave"] df df[df["keff_ave"] > 1.39820] def get_ind_list(pop_list, index): ind_list = [] for pop in pop_list: ind_list.append(pop[index]) return ind_list T_pitch = 0.09266 vol_total = 23.1 * 2.55 * T_pitch * 20 vol_triso = 4 / 3 * np.pi * 4235e-5 ** 3 no_trisos = 0.0979 * vol_total / vol_triso vol_slice = 2.31 * 2.55 * T_pitch * 20 plt.rc('font', family='serif') fig, ax = plt.subplots(figsize=(15,7)) colors = 2*["tab:blue", "tab:orange", "tab:green", "tab:red", "tab:purple", "tab:brown", "tab:pink", "tab:gray", "tab:olive", "tab:cyan", "tab:blue","tab:orange", "tab:green", "tab:red", "tab:purple", "tab:brown", "tab:pink", "tab:gray", "tab:olive", "tab:cyan"] boundaries = np.arange(2,27.1,2.31) midpoints = [] for x in range(len(boundaries)-1): midpoints.append((boundaries[x]+boundaries[x+1])/2) midpoints = np.array(midpoints) x = midpoints c = 0 for i in range(experiments): if i in list(np.argsort(df["keff_ave"]))[35:]: keff_max = checkpoint_files[i]["all"]["outputs"][-1] max_value = max(keff_max, key = lambda t: t[0]) max_index = keff_max.index(max_value) final_pop = checkpoint_files[i]["all"]["populations"][-1] for j, ind in enumerate(final_pop): if j == max_index: print(i, ind) sine = ind[0] * np.sin(ind[1]*x + ind[2]) + 2 sine = sine / sum(sine) * no_trisos * vol_triso / vol_slice ax.plot(midpoints, sine, marker='*', color=colors[c],label="exp "+str(i) + r", $k_{eff\ max}$=" + "{:.5f}".format(max_value[0])) c += 1 #ax.text(midpoints[-1]+0.7, sine[-1], "%f" %max_value, ha="center") #ax.text(midpoints[-1]+2, sine[-1], "%d" %i, ha="center") ax.grid() ax.set_xlabel("x [cm]", fontsize=18) ax.set_ylabel(" Packing Fraction ", fontsize=18) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels, fontsize=16) fig.savefig('topfive_plot.png',bbox_inches='tight', dpi=300) df def comparison(x,y,ax): fine = 25 size = (df["keff_ave"]- np.min(df["keff_ave"]))#**4*2e9 size = (df["keff_ave"]) #im = ax.scatter(df[x][:fine], df[y][:fine], c=size[:fine],alpha=.7, cmap='viridis')#s=size[:fine],edgecolors='k',alpha=.7) #im = ax.scatter(df[x][fine:], df[y][fine:], c=size[fine:],alpha=1, cmap='viridis')#s=size[fine:],edgecolors='k',alpha=.7) #img = ax.scatter(df[x][:fine], df[y][:fine], c=size[fine:],alpha=1, cmap='viridis') img = ax.scatter(df[x][:fine], df[y][:fine], c=size[:fine],alpha=0.6, cmap='cividis', s=90,edgecolors='k') if x == "select_op": ax.set_xticklabels(["selTournament", "selNSGA2", "selBest"]) ax.grid() #ax.set_facecolor('lightgrey') return img fig = plt.figure(figsize=(28, 28)) ax = fig.add_subplot(8.5,7,1) img = comparison("select_op", "tournsize", ax) fig.colorbar(img) hyperparameters = ["pop_size", "mut_prob", "mate_prob", "select_op", "k", "tournsize", "mate_op"] names = ["population size", "mutation probability", "mating probability", "selection operator", "selection individuals", "tournament size", "mating operator"] plt.rc('font', family='serif') fig = plt.figure(figsize=(28, 28)) num = 0 for i, p in enumerate(hyperparameters): for j, q in enumerate(hyperparameters): if i > 0: num += 1 if j < i: ax = fig.add_subplot(7,7,num) img = comparison(q,p,ax) if j == 0: ax.set_ylabel(names[i],fontsize=16) if i == len(hyperparameters) - 1 and j < len(hyperparameters) - 1: ax.set_xlabel(names[j], fontsize=16) cbar_ax = fig.add_axes([0.8, 0.3, 0.02, 0.55]) cbar = fig.colorbar(img, cax=cbar_ax) cbar.ax.set_ylabel('Average $k_{eff}$', rotation=270, fontsize=18) cbar.ax.get_yaxis().labelpad = 20 fig.subplots_adjust(wspace=.6) fig.savefig('hyperparameter_sens.png',bbox_inches='tight', dpi=300) values = {} values["a"], values["b"], values["c"], values["keff_max"], values["keff_ave"] = [], [], [], [], [] for j in range(experiments): results = checkpoint_files[j]["all"] keffs = results["outputs"][-1] pop = results["populations"][-1] max_value = max(keffs, key = lambda t: t[0]) max_index = keffs.index(max_value) values["a"].append(pop[max_index][0]) values["b"].append(pop[max_index][1]) values["c"].append(pop[max_index][2]) values["keff_max"].append(max_value) values["keff_ave"].append(np.mean(keffs)) def comparison2(x,y,ax): fine = 25 ax.scatter(df[x][:fine], values[y][:fine], c=df["keff_ave"][:fine],alpha=0.5, cmap='cividis', s=90,edgecolors='k') ax.scatter(df[x][fine:], values[y][fine:], c=df["keff_ave"][fine:], alpha=1., cmap='cividis', s=90,edgecolors='k') ax.grid() #ax.set_facecolor('lightgrey') return img hyperparameters = ["pop_size", "mut_prob", "mate_prob", "select_op", "k", "tournsize", "mate_op"] names = ["population size", "mutation probability", "mating probability", "selection operator", "selection individuals", "tournament size", "mating operator"] inputs = ["a", "b", "c", "keff_max", "keff_ave"] fig = plt.figure(figsize=(28, 28)) num = 0 for i, p in enumerate(inputs): for j, q in enumerate(hyperparameters): num += 1 ax = fig.add_subplot(7,7,num) img = comparison2(q,p,ax) if j == 0: ax.set_ylabel(p,fontsize=18) if i == len(inputs) - 1: ax.set_xlabel(names[j], fontsize=18) cbar_ax = fig.add_axes([0.93, 0.4, 0.02, 0.45]) cbar = fig.colorbar(img, cax=cbar_ax) cbar.ax.set_ylabel('Average $k_{eff}$', rotation=270, fontsize=18) cbar.ax.get_yaxis().labelpad = 20 fig.savefig('input_hyperparameters_sens.png',bbox_inches='tight', dpi=300) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Movie Recommendation # + pycharm={"is_executing": false} import pandas as pd import matplotlib.pyplot as plt import numpy as np from IPython.display import display # + pycharm={"is_executing": false, "name": "#%%\n"} movies = pd.read_csv("movies.csv", sep = ",", usecols=range(2)) ratings = pd.read_csv("ratings_train.csv", sep = ",", usecols=range(3)) ratings['rating'] = ratings['rating'].astype(float) ratings['userId'] = ratings['userId'].astype(int) # K-Fold Cross Validation k_fold = 10 knn_k_values = [1, 5, 9, 13, 17] # Shuffle the data for k-fold cross validation train = ratings.sample(frac = 1) number_of_rows = len(train.index) # + pycharm={"is_executing": false, "name": "#%% \n"} # Boolean function to check whether a user has watched a movie or not def has_watched(user, movie, user_movie): try: rank = user_movie.at[user,movie] except KeyError: return False if rank == 0: return False # because filled NaNs with zeros else: return True # + pycharm={"is_executing": false, "name": "#%%\n"} def similarity_cosine(u, v): return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)) # + pycharm={"is_executing": false, "name": "#%%\n"} # Find most similar k users to "user", who also watched "movie" def also_watched_similar_users(user, movie, k, user_movie, rank_table): return_list = list() similar_users = rank_table.loc[user].values # Get similar users list of the user j = 0 # counter for k value for i in similar_users: if has_watched(i, movie, user_movie): # If neighbor has also watched movie return_list.append(i) j = j + 1 if j == k: break return return_list # - # Below, *get_mean_rating_from_user_list* function takes the user list which returned by previous *also_watched_similar_users* # function. This function basically calculates the both weighted and unweighted error rate of each user on the list # and finally calculates the mean error rate of both. # + pycharm={"is_executing": false, "name": "#%%\n"} # Find mean rating of a movie from the users from user_list def get_mean_rating_from_user_list(user_list, user, movie, user_movie, sorted_similars): means = list() if not user_list: # If user_list is empty return -1 total_rating = 0 total_rating_weighted = 0 total_similarity = 0 for n in user_list: rate = user_movie.at[n, movie] total_rating += rate similarity = sorted_similars.at[user, n] total_similarity += similarity weighted_rate = rate * similarity total_rating_weighted += weighted_rate unweighted_mean = total_rating / len(user_list) weighted_mean = total_rating_weighted / total_similarity means.extend((unweighted_mean, weighted_mean)) return means # - # Below, *validation_test* function calls the other functions and gets the returned predicted rating values. Afterwards, calculates # the difference between the real rating and predicted rating for both weighted and unweighted KNN and returns these values. # + pycharm={"is_executing": false, "name": "#%%\n"} def validation_test(sorted_similarity, test_data, k, user_movie, rank_table): errors = list() ratings_to_check = test_data["rating"] test_data_without_rating = test_data.drop(columns = ["rating"]) without_rating = test_data_without_rating.to_numpy() ratings_to_check.reset_index(drop = True, inplace = True) total_difference_weighted = 0 total_difference_unweighted = 0 for m in range(len(without_rating)): user, movie, rating = without_rating[m][0], without_rating[m][1], ratings_to_check[m] predicted_values = get_mean_rating_from_user_list( also_watched_similar_users(user, movie, k, user_movie, rank_table), user, movie, user_movie, sorted_similarity) if predicted_values == -1 : continue # if nobody watched the movie in training dataset difference_unweighted = abs(rating - predicted_values[0]) total_difference_unweighted += difference_unweighted difference_weighted = abs(rating - predicted_values[1]) total_difference_weighted += difference_weighted error_unweighted = total_difference_unweighted / len(without_rating) error_weighted = total_difference_weighted / len(without_rating) errors.extend((error_unweighted, error_weighted)) return errors # - # Below for loop is the main operator of the program. This loop splits the data to training and test parts according to # K-Fold method. After that it trains the data and creates corresponding numpy arrays and pandas data frames. After getting the # returned mean error rates from *validation_test* function, for each K-fold, calculates the mean error rates for kNN's k values. # # **Tables that are created in training process can be seen in outputs.** # + pycharm={"is_executing": false, "name": "#%%\n"} # list to plot K with matplotlib y1 = list() y2 = list() count = 0 for K in knn_k_values: total_error_unweighted = 0 total_error_weighted = 0 for i in range(k_fold): if i == 0: test_of_validation = train.iloc[(number_of_rows // k_fold) * i : (number_of_rows // k_fold) * (i + 1)] train_of_validation = train.iloc[(number_of_rows // k_fold) * (i + 1) : (number_of_rows // k_fold) * k_fold] else: train_of_validation_first = train.iloc[ : (number_of_rows // k_fold) * i] test_of_validation = train.iloc[(number_of_rows // k_fold) * i : (number_of_rows // k_fold) * (i + 1)] train_of_validation_second = train.iloc[(number_of_rows // k_fold) * (i + 1) : (number_of_rows // k_fold) * k_fold] # Concatenate the two training parts of validation set train_of_validation = pd.concat([train_of_validation_first, train_of_validation_second]) test_of_validation = test_of_validation.drop(test_of_validation[test_of_validation.userId == 53].index) user_movie_matrix = pd.pivot_table(train_of_validation, values = "rating", index = "userId", columns = "movieId") if count == 0 : display(user_movie_matrix.head()) # Find each user's mean rating among all movies Mean = train_of_validation.groupby(by = "userId",as_index = False)['rating'].mean() Mean.head() rating_avg = pd.merge(train_of_validation,Mean,on = 'userId') rating_avg.rename(columns = {'rating_y':'user_mean', 'rating_x':'rating'}, inplace = True) # Normalize all ratings according to zero rating_avg['normal_rating'] = rating_avg['rating'] - rating_avg['user_mean'] if count == 0 : display(rating_avg.head()) # Fill NaN values with zero user_movie_normal = pd.pivot_table(rating_avg,values = 'normal_rating',index = 'userId',columns = 'movieId') user_movie_normal.fillna(0, inplace = True) user_movie_normal = user_movie_normal.drop([53]) user_movie_matrix.fillna(0, inplace = True) final_numpy = user_movie_normal.to_numpy() if count == 0 : display(user_movie_normal.head()) # Calculate cosine similarity between each user and store in user_user_similarity data frame np.seterr(invalid='ignore') total = 0 user_user = np.ones((len(final_numpy), len(final_numpy))) for i in range(len(final_numpy)): for j in range(len(final_numpy)): user_user[i][j] = similarity_cosine(final_numpy[i],final_numpy[j]) + 1 # Dataset contains the similarity between each user user_user_similarity = pd.DataFrame(data = user_user,index = user_movie_normal.index, columns = user_movie_normal.index) if count == 0 : display(user_user_similarity.head()) # Create new data frame with sorted user-user similarities a = user_user_similarity.values a = np.nan_to_num(a) c = np.copy(a) a.sort(axis=1) a = a[:, ::-1] # ascending user_user_sorted_similarity = pd.DataFrame(a, user_user_similarity.index, user_user_similarity.columns) user_user_similarity = pd.DataFrame(c, user_movie_normal.index, user_movie_normal.index) user_rank_table = user_user_similarity.apply( lambda x: pd.Series(x.sort_values(ascending = False).iloc[1:len(user_user_similarity)].index, index = ['top{}'.format(i) for i in range(2, len(user_user_similarity)+1)]), axis = 1) if count == 0 : display(user_rank_table.head()) error_rates = validation_test(user_user_sorted_similarity, test_of_validation, K, user_movie_matrix, user_rank_table) error_rate_unweighted = error_rates[0] total_error_unweighted += error_rate_unweighted error_rate_weighted = error_rates[1] total_error_weighted += error_rate_weighted count += 1 print("k Value: " + str(K) + " Unweighted Mean Error Rate: " + str(total_error_unweighted / k_fold) + " Weighted Mean Error Rate: " + str(total_error_weighted / k_fold)) y1.append(total_error_unweighted / k_fold) y2.append(total_error_weighted / k_fold) # + pycharm={"is_executing": false, "name": "#%%\n"} # Plot with matplotlib line1 = plt.plot(knn_k_values, y1, label='Unweighted') line2 = plt.plot(knn_k_values, y2, label='Weighted') plt.xlabel('k Value') plt.ylabel('Mean Error Rates') plt.legend() # + [markdown] pycharm={"name": "#%% md\n"} # As it seen from the graph, when k is very small(<4), we have a very high error rate according to other k values. When we choose a k value bigger than 5, # error rate is very acceptable. As k value increases error rate is going down. However, because of the memory and time consumption, bigger k values are # not ideal. For this dataset example, a k value between 6-10 looks as best choice. # # On the other hand, because we normalized all rating values between 0 - 2, user similarities are very close to each other. As a result of that, weighted k-NN # gives us very similar results to unweighted k-NN. # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx from glob import glob import pdb import re import seaborn as sns; sns.set() import matplotlib.pyplot as plt from matplotlib import gridspec import pandas as pd; pd.options.display.float_format = '{:,.2f}'.format import statsmodels.stats.api as sm import warnings; warnings.filterwarnings("ignore", category=UserWarning) from statistics import median_low # + import sys import os sys.path.extend(['./..']) # have to add the project path manually to the Python path os.chdir('./..') # - pwd from src.utils import load_pickle from src.Tree import TreeNode # %matplotlib inline # ## Aggregate all the stats & make the plot # + #graphs = ['3-comm', 'BA-1000-3', 'BA-100-3', 'clique-ring-100-4', 'clique-ring-25-4', 'clique-ring-50-4', 'dolphins', 'eucore', 'flights', 'football', 'grid-100-100', 'grid-10-10', 'karate', 'ladder-10', 'ladder-100', 'ladder-20', 'la dder-4', 'ladder-50', 'ladder-500', 'ring-10', 'ring-100', 'ring-1000', 'ring-20', 'ring-500'] graphs = ['eucore', 'clique-ring-500-4'] #models_eucore = ['BTER', 'BUGGE', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GCN_AE', 'GCN_VAE', 'HRG', 'Kronecker', 'NetGAN', 'SBM'] models_eucore_reduced = ['Linear_AE'] #models_3_comm = ['BTER', 'Chung-Lu', 'CNRG', 'Deep_GCN_AE', 'Deep_GCN_VAE', 'Erdos-Renyi', 'GCN_AE', 'GCN_VAE', 'GraphVAE', 'HRG', 'Linear_AE', 'Linear_VAE', 'NetGAN' 'SBM'] #models_clique_ring_25_4 = ['BTER', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GraphAE', 'GraphVAE', 'HRG', 'NetGAN', 'SBM'] #models_clique_ring_100_4 = ['BTER', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GraphAE', 'HRG', 'SBM'] #models_dolphins = ['BTER', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GraphAE', 'GraphVAE', 'HRG', 'SBM'] #models_karate = ['BTER', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GraphAE', 'GraphVAE', 'HRG', 'Kronecker', 'NetGAN'] #models_ladder_50 = ['BTER', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'GraphAE', 'GraphVAE', 'HRG', 'SBM'] # - base_path = '/home/danielgonzalez/repos/infinity-mirror/output/pickles' models = models_eucore_reduced graph = 'eucore' sel = 'fast' def get_stats_from_root(graph, model, sel, root, cols, trial_id): for tnode in root.descendants: row = {} row['graph'] = graph row['type'] = 'absolute' row['orig_n'] = root.graph.order() row['orig_m'] = root.graph.size() row['orig_graph_obj'] = root.graph row['model'] = model row['sel'] = sel row['trial_id'] = trial_id row['gen_id'] = tnode.depth row['gen_n'] = tnode.graph.order() row['gen_m'] = tnode.graph.size() row['gen_graph_obj'] = tnode.graph # use the stats compared with the original seed stats = tnode.stats assert set(cols[-8: ]) == set(stats.keys()), f'tnode: {stats.keys()} doesnt have all the reqd stats' for key, val in stats.items(): row[key] = val assert len(row.keys()) == len(cols), \ f'Improper number of cols in row: {len(row)}: expected {len(cols)} {stats.keys()}' yield row for tnode in root.descendants: row = {} row['graph'] = graph row['type'] = 'sequential' row['orig_n'] = root.graph.order() row['orig_m'] = root.graph.size() row['orig_graph_obj'] = root.graph row['model'] = model row['sel'] = sel row['trial_id'] = trial_id row['gen_id'] = tnode.depth row['gen_n'] = tnode.graph.order() row['gen_m'] = tnode.graph.size() row['gen_graph_obj'] = tnode.graph # use the stats compared with the previous graph stats = tnode.stats_seq assert set(cols[-8: ]) == set(stats.keys()), f'tnode: {stats.keys()} doesnt have all the reqd stats' for key, val in stats.items(): row[key] = val assert len(row.keys()) == len(cols), \ f'Improper number of cols in row: {len(row)}: expected {len(cols)} {stats.keys()}' yield row # + cols = ['graph', 'type', 'orig_n', 'orig_m', 'orig_graph_obj', 'model', 'sel', 'trial_id', 'gen_id', 'gen_n', 'gen_m', 'gen_graph_obj', 'deltacon0', 'lambda_dist', 'degree_cvm', 'pagerank_cvm', 'pgd_pearson', 'pgd_spearman', 'node_diff', 'edge_diff'] data = {col: [] for col in cols} for model in models: path = os.path.join(base_path, graph, model) for filename in os.listdir(path): if filename[5:7:1] == '20': trial_id = filename[8:10:1] try: trial_id = int(trial_id) except ValueError: trial_id = int(trial_id[:-1]) root = load_pickle(os.path.join(path, filename)) for row in get_stats_from_root(graph=graph, model=model, sel=sel, root=root, cols=cols, trial_id=trial_id): for col, val in row.items(): data[col].append(val) df = pd.DataFrame(data) # - df #df[(df.graph=='clique-ring-25-4') & (df.gen_id==5)].shape plt.rcParams['figure.figsize'] = [40, 20] def group_plot(df, graph_name): graph = df.graph.unique()[0] metrics = ['node_diff', 'edge_diff', 'lambda_dist', 'deltacon0', 'degree_cvm']#, 'pgd_spearman'] models = df.model.unique() rows = len(metrics) cols = len(models) n_d_min = min(df[df.model==model].node_diff.min() for model in models) - 1 n_d_max = max(df[df.model==model].node_diff.max() for model in models) + 5 e_d_min = min(df[df.model==model].edge_diff.min() for model in models) - 1 e_d_max = max(df[df.model==model].edge_diff.max() for model in models) + 5 l_d_min = min(df[df.model==model].lambda_dist.min() for model in models) - 0.1 l_d_max = max(df[df.model==model].lambda_dist.max() for model in models) + 0.15 dc0_min = min(df[df.model==model].deltacon0.min() for model in models) - 100 dc0_max = max(df[df.model==model].deltacon0.max() for model in models) + 100 p_sp_min = min(df[df.model==model].pgd_spearman.min() for model in models) - 0.1 p_sp_max = max(df[df.model==model].pgd_spearman.max() for model in models) + 0.15 d_min = min(df[df.model==model].degree_cvm.min() for model in models) - 0.1 d_max = max(df[df.model==model].degree_cvm.max() for model in models) + 0.15 fig, axes = plt.subplots(nrows=rows, ncols=cols, sharex=True) print(rows, cols) for i in range(rows): for j in range(cols): #ax = axes[i, j] ax = axes[i] metric = metrics[i] model = models[j] filtered_df = df[df.model==model] if i == 0 and j == 0: legend_style = 'brief' else: legend_style = '' sns.lineplot(x='gen_id', y=metric, ax=ax, data=filtered_df, hue='type', marker='o', ci=99, err_style='band', legend=legend_style); if metric == 'node_diff': ax.set_ylim((n_d_min, n_d_max)) elif metric == 'edge_diff': ax.set_ylim((e_d_min, e_d_max)) elif metric == 'lambda_dist': ax.set_ylim((l_d_min, l_d_max)) elif metric == 'deltacon0': ax.set_ylim((dc0_min, dc0_max)) elif metric == 'pgd_spearman': ax.set_ylim((p_sp_min, p_sp_max)) elif metric == 'degree_cvm': ax.set_ylim((d_min, d_max)) if j == 0: ax.set_ylabel(metric) else: ax.set_ylabel('') if i == 0: ax.set_title(model) else: ax.set_title('') if i == rows - 1: ax.set_xlabel('gen_id') else: ax.set_xlabel('') plt.suptitle(f'{graph}', y=1.03); plt.tight_layout() plt.savefig(f'analysis/figures/{graph_name}.pdf', format='pdf', dpi=1000, bbox_inches='tight') # ## plots group_plot(df, graph) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Group ICA: a tutorial # # Author: # # Group ICA extends the celebrated Independent Component Analysis to multiple datasets. # # Single view ICA decomposes a dataset $X$ as $X = S \times A^{\top}$, where $S$ are the independent sources (meaning that the columns of $S$ are independent), and $A$ is the mixing matrix. # # In group ICA, we have several views $Xs = [X_1, \dots, X_n]$. Each view is obtained as # # $$ # Xi \simeq S \times Ai.T # $$ # # so the views share the same sources $S$, but have different mixing matrices $A_i$. # It is a powerful tool for group inference, as it allows to extract signals that are comon across views. import numpy as np import matplotlib.pyplot as plt from mvlearn.decomposition import GroupICA def plot_sources(S): n_samples, n_sources = S.shape fig, axes = plt.subplots(n_sources, 1, figsize=(6, 4), sharex=True) for ax, sig in zip(axes, S.T): ax.plot(sig) # First, let's define some sources: # + np.random.seed(0) n_samples = 2000 time = np.linspace(0, 8, n_samples) s1 = np.sin(2 * time) * np.sin(40 * time) s2 = np.sin(3 * time) ** 5 s3 = np.random.laplace(size=s1.shape) S = np.c_[s1, s2, s3] plot_sources(S) # - # Next, generate some views, which are noisy observations of linear transforms of these sources: n_views = 10 mixings = [np.random.randn(3, 3) for _ in range(n_views)] Xs = [np.dot(S, A.T) + 0.3 * np.random.randn(n_samples, 3) for A in mixings] # We can visualize one dataset: it looks quite messy. plot_sources(Xs[0]) # Next, we can apply group ICA. The option `multiview_output=False` means that we want to recover the estimated sources when we do `.transform`. groupica = GroupICA(multiview_output=False).fit(Xs) # Let's look at what the algorithm estimates: estimated_sources = groupica.transform(Xs) plot_sources(estimated_sources) # Looks pretty good ! We can also wheck that it has correctly predicted each mixing matrix. The estimated mixing matrices are stored in the `.individual_mixing_` attribute. estimated_mixings = groupica.individual_mixing_ # If $\tilde{A}$ is the estimated mixing matrix and $A$ is the true mixing matrix, we can look at $\tilde{A}^{-1}A$. It should be close to a scale and permuation matrix: in this case, the sources are correctly estimated, up to scale and permutation. plt.matshow(np.dot(np.linalg.pinv(estimated_mixings[0]), mixings[0])) # A great advantage of groupICA is that it leverages the multiple views to reduce noise. For instance, if only had two views, we would have obtained: estimated_sources = groupica.fit_transform(Xs[:2]) plot_sources(estimated_sources) # Another important property of group ICA is that it can recover signals that are common to all datasets, and separate these signals from the rest. Imagine that we only have one common source across datasets: common_source = S[:, 0] mixings = np.random.randn(n_views, 3) Xs = [a * common_source[:, None] + 0.3 * np.random.randn(n_samples, 3) for a in mixings] estimated_sources = groupica.fit_transform(Xs) plot_sources(estimated_sources) # It recovers the common source on one channel, and the other estimated sources are noise. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from tblfaker import TableFaker # + # Basic usage: get outputs as tuples faker = TableFaker() print("[1]") for row in faker.generate(["name", "address"], rows=4).as_tuple(): print(row) print("\n[2]") for row in faker.generate(["name", "address"], rows=4).as_tuple(): print(row) # + # Basic usage: using a seed to get stable outputs seed = 1 print("[1]") faker = TableFaker(seed=seed) for row in faker.generate(["name", "address"], rows=4).as_tuple(): print(row) print("\n[2]") faker = TableFaker(seed=seed) for row in faker.generate(["name", "address"], rows=4).as_tuple(): print(row) # + # Change locale faker = TableFaker(locale="ja_JP") for row in faker.generate(["name", "address"], rows=4).as_tuple(): print(row) # + # Get outputs as JSON import json faker = TableFaker(seed=1) print(json.dumps(faker.generate(["name", "address"], rows=2, table_name="dict").as_dict(), indent=4)) # + # List providers from tblfaker import get_providers for provider in sorted(get_providers()): print(provider) # + providers = get_providers() - {"zip"} limit = 32 for provider, row in zip(providers, faker.generate(providers, rows=2).transpose().rows): print("{}: {}".format( provider, [value[:limit] if isinstance(value, (str, list)) else str(value)[:limit] for value in row] )) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def gauss_seidel_method(mat_aum, x, e): mat_A = [lin[0:-1] for lin in mat_aum] mat_b = [lin[-1] for lin in mat_aum] iteracoes = 0 erros = [0]*len(x) x_geral = [] x_geral.append(x.copy()) # Primeira iteração é diferente porque precisa ter a primeira matriz erros ainda for i in range(0, len(mat_A)): aii = mat_A[i][i] bi = mat_b[i] soma_parenteses = bi for j in range(0, len(mat_A[i])): if j != i: soma_parenteses -= mat_A[i][j]*x[j] x[i] = (1/aii)*soma_parenteses iteracoes += 1 x_geral.append(x.copy()) for i in range(0, len(x_geral[-1])): erros[i] = abs(x_geral[-1][i] - x_geral[len(x_geral)-2][i]) while max(erros) > e: for i in range(0, len(mat_A)): aii = mat_A[i][i] bi = mat_b[i] soma_parenteses = bi for j in range(0, len(mat_A[i])): if j != i: soma_parenteses -= mat_A[i][j]*x[j] x[i] = (1/aii)*soma_parenteses x_geral.append(x.copy()) for i in range(0, len(x_geral[-1])): erros[i] = abs(x_geral[-1][i] - x_geral[len(x_geral)-2][i]) iteracoes += 1 return (iteracoes, x_geral) def sassenfeld_criterion(mat_aum): mat_A = [lin[0:-1] for lin in mat_aum] beta = [] for i in range(0, len(mat_A)): bi = 0 for j in range(0, len(mat_A[i])): if j != i: if len(beta) >= (j+1): bi += abs(mat_A[i][j])*beta[j] else: bi += abs(mat_A[i][j]) beta.append(bi/abs(mat_A[i][i])) if max(beta) < 1: return (True, beta) else: return (False, beta) exemplo_aula = [ [1, 1, 3], [1, -3, -3] ] gauss_seidel_method(exemplo_aula, [0, 0], 0.03) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pytesseract from PIL import Image #image_to_string: pytesseract.pytesseract.tesseract_cmd = r'C:\Users\USER\AppData\Local\Tesseract-OCR\tesseract.exe' #pytesseract.pytesseract.tesseract_cmd = 'C:\Program Files\Tesseract-OCR\tessdata' #pytesseract.pytesseract.tesseract_cmd = 'C:\Program Files\Tesseract-OCR\tesseract.exe' # your path may be different pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' img = Image.open('test1.jpeg') result = pytesseract.image_to_string(img) with open('magic.txt',mode='w') as file: file.write(result) print( 'magic.txt') # - result = pytesseract.image_to_string(img) type(result) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append("..") import spotlob from spotlob.process_opencv import SimpleReader, BinaryThreshold, ContourFinder, GaussianPreprocess, GreyscaleConverter, draw_contours import cv2 import numpy filename = "inner-outer-demo.png" reader = SimpleReader() converter = GreyscaleConverter() preprocessor = GaussianPreprocess(ksize=1) binarizer = BinaryThreshold(threshold=100) s1 = spotlob.Spim.from_file(filename, cached=True).read(reader).convert(converter).preprocess(preprocessor).binarize(binarizer) finder = ContourFinder(mode="all") modes = sorted(finder.parameters[0].options) import matplotlib.pyplot as plt # + nh = int(numpy.ceil(len(modes)/2)) size = 5 fig, axes = plt.subplots(nh,2, figsize=(2*size, size*nh)) for mode, ax in zip(modes, axes.flatten()): finder = ContourFinder(mode=mode) s2 = s1.extract_features(finder) contours = s2.metadata['contours'] cimg = cv2.cvtColor(s2.image, cv2.COLOR_GRAY2RGB) img = draw_contours(cimg, contours, color=(255,0,0), thickness=3) ax.imshow(img) ax.text(2,-10,mode) ax.axis('off') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 64-bit # name: python3 # --- # + import dtale import pandas as pd import warnings warnings.filterwarnings('ignore') data20 = pd.read_csv('data/filtered_2020.csv', header=0, low_memory=False) d = dtale.show(data20) d.open_browser() # + import sweetviz as sv report = sv.analyze(data20) report.show_html() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="XBtZdDTczUAL" # # Introduction # # Theme 2: Healthcare # https://www.kaggle.com/gpreda/covid-world-vaccination-progress # * Q1: Which country has the highest & lowest % of population vaccinated? # * Q2: Is there any correlation between the number of population vaccinated vs international borders relaxing? # # Next week, you will present your solution + reasons in a 3-slide presentation to any one of the themes you have chosen above. Maximum presentation time is 5 minutes, tops! # + [markdown] id="1JRA-Lc_1SPP" # # Initialize # + colab={"base_uri": "https://localhost:8080/"} id="nxmMJO8H1UUv" executionInfo={"status": "ok", "timestamp": 1624738682550, "user_tz": -60, "elapsed": 17600, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="ccf02cbd-ba87-457c-a3a8-3c3b12bf3f0e" # Mount the drive folder from google.colab import drive # to load data from google drive drive.mount("/content/drive") # + id="IX-4yBcM1XPb" executionInfo={"status": "ok", "timestamp": 1624738685443, "user_tz": -60, "elapsed": 1101, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} # Load libraries import os # For files operations import urllib.request # For download from url import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # ploting the data import seaborn as sns # ploting the data import csv # to import data in txt files # + id="PKIW_bdB1aJY" executionInfo={"status": "ok", "timestamp": 1624738688054, "user_tz": -60, "elapsed": 180, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} # Set up color blind friendly color palette # The palette with grey: cbPalette = ["#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"] # The palette with black: cbbPalette = ["#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"] # sns.palplot(sns.color_palette(cbPalette)) # sns.palplot(sns.color_palette(cbbPalette)) sns.set_palette(cbPalette) #sns.set_palette(cbbPalette) # + id="l-u6YoBc1cyJ" executionInfo={"status": "ok", "timestamp": 1624738690223, "user_tz": -60, "elapsed": 210, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} # Seaborn favourite plot shortcuts def boxplot(data, x, y, title = ""): """ This function generates a seaborn boxplot with my defaults parameters. Parameters: title (string) title of the plot, default is empty data (df) the data frame x (panda serie) the x axis y (panda serie) the y axis """ f, ax = plt.subplots(figsize=(8, 6)) sns.boxplot(x=x, y=y, data=data, notch=True, showmeans=True, meanprops={"marker":"s","markerfacecolor":"white", "markeredgecolor":"black"}) plt.title(title) plt.ioff() def countplot(data, variable, title = ""): """ This function contains my favourite parameters for the seaborn coutplot plot """ f, ax = plt.subplots(figsize=(8, 6)) sns.countplot(data=data, x=variable) plt.title(title) plt.ioff() # + colab={"base_uri": "https://localhost:8080/"} id="BJUxHodQ3soU" executionInfo={"status": "ok", "timestamp": 1624738692874, "user_tz": -60, "elapsed": 557, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="36c2fac4-c077-4230-ba22-aba9eaf044d5" # Set up the path for the data and output folders to the challenge and list files PATH = "/content/drive/MyDrive/Data_science/DSAK" data_dir = PATH + "/Data" output_dir = PATH + "/Output" os.listdir(data_dir) # + [markdown] id="ZcxYvbEj0TVr" # # Q1: Which country has the highest and lowest percentage of population vaccinated? # + [markdown] id="uPcr26ez5s3N" # ## Using the manufacturer dataset # + colab={"base_uri": "https://localhost:8080/"} id="hRkzIhH-32az" executionInfo={"status": "ok", "timestamp": 1624738696234, "user_tz": -60, "elapsed": 474, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="73cea3ea-0890-41d4-d35e-226dcb23df08" file = data_dir + "/" + "country_vaccinations_by_manufacturer.csv" vaccine_manufacturer = pd.read_csv(file, sep = ',', encoding = 'UTF-8') vaccine_manufacturer .info() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="ECpoD_Nq6NLj" executionInfo={"status": "ok", "timestamp": 1624738698024, "user_tz": -60, "elapsed": 304, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="ad89d29c-8b44-4abe-f0ac-22b2e2300e0e" vaccine_manufacturer.head() # + colab={"base_uri": "https://localhost:8080/", "height": 235} id="AUR9ZGBE6SEg" executionInfo={"status": "ok", "timestamp": 1624738700508, "user_tz": -60, "elapsed": 232, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="339c7407-ed85-4236-bbe0-6f24be1eefce" # Compute the total of vaccine for each country by location vaccine_manufacturer .rename(columns={'location': 'Country'}, inplace=True) df = vaccine_manufacturer .groupby(['Country'])[['total_vaccinations']].max() df.head() # + colab={"base_uri": "https://localhost:8080/"} id="j924Bvr46am7" executionInfo={"status": "ok", "timestamp": 1624738702674, "user_tz": -60, "elapsed": 514, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="5a15c9a3-7936-4c79-c255-e4cc4be0f092" # Combine with the world population dataset file = data_dir + "/" + "population_by_country_2020.csv" population = pd.read_csv(file, sep = ',', encoding = 'UTF-8') population.info() # + colab={"base_uri": "https://localhost:8080/", "height": 221} id="ouTX5gpw6eCa" executionInfo={"status": "ok", "timestamp": 1624738705153, "user_tz": -60, "elapsed": 277, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="f4fcfcbf-00a3-4585-b97d-3027f380b6fa" population.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="UXSHvHW3BcKn" executionInfo={"status": "ok", "timestamp": 1624738708162, "user_tz": -60, "elapsed": 217, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="950a103c-e4b3-4a36-aa52-4e759cda6e19" df2 = population[['Country (or dependency)', 'Population (2020)', 'Med. Age', 'Urban Pop %', 'World Share']].copy() df2.rename(columns={'Country (or dependency)': 'Country'}, inplace=True) df2.rename(columns={'Population (2020)': 'Population'}, inplace=True) df2.rename(columns={'Med. Age': 'Median_age'}, inplace=True) df2.rename(columns={'Urban Pop %': 'Urban_pop_percentage'}, inplace=True) df2.head() # + colab={"base_uri": "https://localhost:8080/"} id="fwCAacn-b63h" executionInfo={"status": "ok", "timestamp": 1624738710896, "user_tz": -60, "elapsed": 315, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="9b210f9a-e6d2-46ba-c7f6-ed9f14f97a35" vaccine_manufacturer.Country.unique() # + colab={"base_uri": "https://localhost:8080/"} id="1u75S-wkjp4F" executionInfo={"status": "ok", "timestamp": 1624738713060, "user_tz": -60, "elapsed": 199, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="73111af6-1731-4a53-d387-37a8309811d1" vaccine_manufacturer = vaccine_manufacturer.groupby(["Country"])["total_vaccinations"].max() vaccine_manufacturer.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="hPEt53HDE2u3" executionInfo={"status": "ok", "timestamp": 1624738715109, "user_tz": -60, "elapsed": 236, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="1f4aef39-beda-44d5-d6aa-9abde601f76e" # Combine the datasets # The UK was lost in the process. I would beed to check that the countries names # corresponds in both datasets merged = pd.merge(vaccine_manufacturer, df2, on = "Country") merged.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="09rQDJ9aTZM5" executionInfo={"status": "ok", "timestamp": 1624738717637, "user_tz": -60, "elapsed": 186, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="02fde68c-9cf1-4821-9248-58c26808a51c" # Add a new column percentage of population vaccinated # percentage_vaccinated = total_vaccinations/Population * 100 merged["percentage_vaccinated"] = merged["total_vaccinations"] / merged["Population"] * 100 merged.head() # + colab={"base_uri": "https://localhost:8080/", "height": 622} id="Bp3rfp-eqoXG" executionInfo={"status": "ok", "timestamp": 1624738720646, "user_tz": -60, "elapsed": 770, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="fd55923b-8776-421a-8dde-24fcb383aa36" # Plot percentage of vaccinated people per country # https://datavizpyr.com/sort-bars-in-barplot-using-seaborn-in-python/ title = 'Percentage of vaccinated people per country' data = merged.copy() #data.sort_values(by=['percentage_vaccinated']).reset_index() y = "Country" x = "percentage_vaccinated" f, ax = plt.subplots(figsize=(8, 10)) sns.barplot(x=x, y=y, data=data, color = "#E69F00", order=data.sort_values('percentage_vaccinated', ascending=False).Country) plt.title(title) plt.ioff() # + [markdown] id="DxykcN5FygYw" # After looking closely at the data set it appear that many countries, such as the UK, are missing from this dataset. # + [markdown] id="kdcCB5WJ50MX" # ## Using the vaccine dataset # + colab={"base_uri": "https://localhost:8080/"} id="ijxCaA5e55sd" executionInfo={"status": "ok", "timestamp": 1624740065256, "user_tz": -60, "elapsed": 630, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="36753c16-2178-49ce-de24-fd0a05c3825d" file = data_dir + "/" + "country_vaccinations.csv" vaccine = pd.read_csv(file, sep = ',', encoding = 'UTF-8') vaccine.info() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="MnLdxaRO6eEm" executionInfo={"status": "ok", "timestamp": 1624740066683, "user_tz": -60, "elapsed": 8, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="cb0f8ab1-2baa-4122-fd66-cfb6e7e401bd" vaccine = vaccine[['country', 'date', 'total_vaccinations', 'people_fully_vaccinated', 'vaccines']] vaccine.rename(columns={'country': 'Country'}, inplace=True) vaccine.head() # + colab={"base_uri": "https://localhost:8080/"} id="sIzIROcB_yf9" executionInfo={"status": "ok", "timestamp": 1624740069717, "user_tz": -60, "elapsed": 4, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="ed6c32ee-4721-4c55-dfcc-2f60a281b1b0" vaccine = vaccine.groupby(["Country"])["people_fully_vaccinated"].max() vaccine.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Im5F_s1F8xGw" executionInfo={"status": "ok", "timestamp": 1624740073234, "user_tz": -60, "elapsed": 925, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="c69bca68-e967-4fb2-87f6-274628a58c98" # Combine the datasets # The UK was lost in the process. I would beed to check that the countries names # corresponds in both datasets merged2 = pd.merge(vaccine, df2, on = "Country") merged2.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="CxJm0UO59Igp" executionInfo={"status": "ok", "timestamp": 1624740365182, "user_tz": -60, "elapsed": 213, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="834816e2-0da3-4d1a-a6ed-6947e1207d07" # Add a new column percentage of population vaccinated # percentage_vaccinated = total_vaccinations/Population * 100 merged2["percentage_vaccinated"] = merged2["people_fully_vaccinated"] / merged2["Population"] * 100 merged2.head() # + id="vVxF0SHrBNRG" executionInfo={"status": "ok", "timestamp": 1624741050657, "user_tz": -60, "elapsed": 216, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} # filter country with higher coverage high_coverage = merged2.loc[(merged2['percentage_vaccinated'] >= 40) & (merged2['percentage_vaccinated'] < 100)] high_coverage = high_coverage.sort_values(by=['percentage_vaccinated'], ascending=False).reset_index() high_coverage = high_coverage.head(n=20) # + id="p6xNos6k9K_V" colab={"base_uri": "https://localhost:8080/", "height": 405} executionInfo={"status": "ok", "timestamp": 1624741052193, "user_tz": -60, "elapsed": 311, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="46781ed8-22fe-4757-b540-b4aa975bb982" # Plot percentage of vaccinated people per country # https://datavizpyr.com/sort-bars-in-barplot-using-seaborn-in-python/ title = 'Percentage of vaccinated people per country' data = high_coverage #data.sort_values(by=['percentage_vaccinated']).reset_index() y = "Country" x = "percentage_vaccinated" f, ax = plt.subplots(figsize=(8, 6)) sns.barplot(x=x, y=y, data=data, color = "#E69F00", order=data.sort_values('percentage_vaccinated', ascending=False).Country) plt.title(title) plt.ioff() # + id="UxoNSw9WFilh" executionInfo={"status": "ok", "timestamp": 1624740800583, "user_tz": -60, "elapsed": 307, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} # filter country with lower coverage low_coverage = merged2.loc[(merged2['percentage_vaccinated'] < 5)] low_coverage = low_coverage.sort_values(by=['percentage_vaccinated'], ascending=True).reset_index() low_coverage = low_coverage.head(n=20) # + colab={"base_uri": "https://localhost:8080/", "height": 405} id="sL0jKqI8FqGb" executionInfo={"status": "ok", "timestamp": 1624741134228, "user_tz": -60, "elapsed": 228, "user": {"displayName": "", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjK0v36XxtzMAuNrpVnOXhqBpTN8QL_OYny8mh1mw=s64", "userId": "13891238098951500126"}} outputId="8d4dfcbf-c584-4a83-d799-8f8bbf0061e7" # Plot percentage of vaccinated people per country # https://datavizpyr.com/sort-bars-in-barplot-using-seaborn-in-python/ title = 'Percentage of vaccinated people per country' data = low_coverage y = "Country" x = "percentage_vaccinated" f, ax = plt.subplots(figsize=(8, 6)) sns.barplot(x=x, y=y, data=data, color = "#56B4E9", order=data.sort_values('percentage_vaccinated', ascending=False).Country) plt.title(title) plt.ioff() # + [markdown] id="nU6h3SA8vzL1" # # Q2: Is there any correlation between the number of population vaccinated vs international borders relaxing? # # For this question we would need a dataset on border relaxing. This is probably hard to find. # # * https://www.kayak.com/travel-restrictions # * https://casinodata.io/ # # The data was manaully collected form casinodata Sat 26 June 21 art 19:02. # + id="zZWCkoAGfsq-" # Combine with the world population dataset file = data_dir + "/" + "Border_status.csv" border = pd.read_csv(file, sep = ',', encoding = 'UTF-8') border.info() # + id="xTPnf7k_iRr7" # Combine the datasets df = pd.merge(merged, border, on = "Country") df.head() # + id="kbfTnykRib2p" # Plot numner of vaccine by borders status title = "Vaccination coverage per border status for tourists" data = df x = "Border_status_tourist" y = "percentage_vaccinated" f, ax = plt.subplots(figsize=(8, 6)) sns.boxplot(x=x, y=y, data=data, showmeans=True, meanprops={"marker":"s","markerfacecolor":"white", "markeredgecolor":"black"}) plt.title(title) plt.ioff() # + id="KDFQF3r1lqhC" # Plot numner of vaccine by borders status title = "Vaccination coverage per border status for residents" data = df x = "Border_status_resident" y = "percentage_vaccinated" f, ax = plt.subplots(figsize=(8, 6)) sns.boxplot(x=x, y=y, data=data, showmeans=True, meanprops={"marker":"s","markerfacecolor":"white", "markeredgecolor":"black"}) plt.title(title) plt.ioff() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # VPC Flow Logs report # Goal: Compare configuration to traffic, harden security groups and NACL's by traffic. # # Setup # ## Parameters and AWS session configuration # + from PIL import Image import io import os import time import io import numpy as np import boto3 import os import json import pandas as pd import matplotlib.pyplot as plt import urllib.parse import datetime from boto3.session import Session import ipaddress import configparser pd.options.display.width = 0 vpc_flow_log_bucket = os.environ['S3_FLOW_LOG_BUCKET'] if 'S3_FLOW_LOG_BUCKET' in os.environ else None vpc_flow_log_object_path = os.environ['S3_FLOW_LOG_PATH'] if 'S3_FLOW_LOG_BUCKET' in os.environ else None account_id = os.environ['ACCOUNT_ID'] if 'ACCOUNT_ID' in os.environ else None aws_profile = os.environ['AWS_PROFILE'] if 'AWS_PROFILE' in os.environ else None aws_default_region = os.environ['AWS_DEFAULT_REGION'] if 'AWS_DEFAULT_REGION' in os.environ else None aws_assume_role_profile = os.environ['ASSUME_RULE_PROFILE'] if 'ASSUME_RULE_PROFILE' in os.environ else None aws_role_session_name = os.environ['AWS_ROLE_SESSION_NAME'] if 'AWS_ROLE_SESSION_NAME' in os.environ else 'vpc-flow-log-notebook' aws_shared_cred_file = os.path.expanduser(os.environ['AWS_SHARED_CREDENTIALS_FILE']) if 'AWS_SHARED_CREDENTIALS_FILE' in os.environ\ else os.path.expanduser('~/.aws/credentials') if not account_id or not vpc_flow_log_bucket or not vpc_flow_log_object_path: raise Exception("Not all required envrioment variables are set") use_assume_rule = aws_assume_role_profile is not None if not aws_default_region: os.environ['AWS_DEFAULT_REGION'] = 'us-west-2' if aws_profile: session = boto3.Session(profile_name=aws_profile) else: session = boto3.session.Session() if use_assume_rule: config = configparser.ConfigParser() config.read(aws_shared_cred_file) role_name = config[aws_assume_role_profile]['role_arn'] external_id = config[aws_assume_role_profile]['external_id'] creds = session.client('sts').assume_role(RoleArn=role_name, RoleSessionName=aws_role_session_name,\ ExternalId=external_id)['Credentials'] session = Session(aws_access_key_id=creds['AccessKeyId'], aws_secret_access_key=creds['SecretAccessKey'], aws_session_token=creds['SessionToken']) print("Assumed the role: " + role_name) regions = [region['RegionName'] for region in session.client('ec2').describe_regions()['Regions']] # - im = Image.open("./assets/vpc_dag.png") im # # Configuration research # ## Collect ENI Configuration Data # Rationale: vpc flogs logs are not mapped to security groups def get_eni_configuration(): result = {} enis=[] for region in regions: region_name=region enis=session.client('ec2',region_name=region_name).describe_network_interfaces(MaxResults=1000)['NetworkInterfaces'] for eni in enis: eni_id = eni['NetworkInterfaceId'] for group in eni['Groups']: sg_id = group['GroupId'] record_key = "{}_{}".format(eni_id,sg_id) record=[eni_id,sg_id,eni['PrivateIpAddress'],group['GroupName'],eni['Description'],eni['AvailabilityZone'],eni['VpcId'],region_name] result[record_key] = record return pd.DataFrame.from_dict(result,orient='index', columns=['NetworkInterfaceId','GroupId','PrivateIpAddress','GroupName','eniDescription','AvailabilityZone','VpcId','Region']).sort_values(by=['NetworkInterfaceId']) eni_conf_table = get_eni_configuration() # ## Collect Security Groups Configuration Data # ### Private IP classification im = Image.open("./assets/private_ip_dag.png") im # + def is_in_private_subnet(ip,vpcs_cidrs): for vpc_cidrs_value in vpcs_cidrs.values(): for subnet in vpc_cidrs_value: ip_network = ipaddress.ip_network(ip) subnet_network = ipaddress.ip_network(subnet) if (ip_network.overlaps( subnet_network)) : return True return False def cidr_ranges(rule,vpcs_cidrs): IpRanges = [] Ipv6Ranges = [] is_ip_private = True is_ipv6_private = True if len(rule['IpRanges'])>0: for cidr in rule['IpRanges']: cidr_ip = ipaddress.ip_network(cidr['CidrIp']) is_ip_private = cidr_ip.is_private if cidr_ip == ipaddress.ip_network('0.0.0.0/0'): is_ip_private = False elif is_in_private_subnet(cidr_ip,vpcs_cidrs): is_ip_private = True IpRanges.append(cidr_ip) if len(rule['Ipv6Ranges'])>0: for cidr in rule['Ipv6Ranges']: cidr_ip = ipaddress.ip_network(cidr['CidrIpv6']) Ipv6Ranges.append(cidr_ip) is_ipv6_private = cidr_ip.is_private return IpRanges, Ipv6Ranges, is_ip_private, is_ipv6_private # + def add_sg_configuration(eni_conf_table): eni_conf_dict = eni_conf_table.to_dict('r') result = {} vpcs_cidrs = get_vpc_cidrs(eni_conf_table) for eni_conf in eni_conf_dict: try: sg_details = session.resource('ec2',region_name=eni_conf['Region']).SecurityGroup(eni_conf['GroupId']) for rule in sg_details.ip_permissions: internal = False sg_pairs = [] if 'IpProtocol' in rule and rule['IpProtocol']=='-1': allowed_port = 'all' if 'FromPort' in rule: allowed_port = rule['FromPort'] if len(rule['UserIdGroupPairs'])>0: internal = True for pair in rule['UserIdGroupPairs']: sg_pairs.append(pair['GroupId']) if pair['UserId'] != account_id: internal = False IpRanges, Ipv6Ranges, is_ip_private, is_ipv6_private = cidr_ranges(rule,vpcs_cidrs) record_key = "{}_{}_{}".format(eni_conf['NetworkInterfaceId'],eni_conf['GroupId'],allowed_port) record=[eni_conf['NetworkInterfaceId'],eni_conf['GroupId'],allowed_port,internal,eni_conf['PrivateIpAddress'],eni_conf['GroupName'],eni_conf['eniDescription'],eni_conf['AvailabilityZone'],eni_conf['VpcId'],eni_conf['Region'],rule['IpProtocol'],IpRanges,Ipv6Ranges,is_ip_private, is_ipv6_private,sg_pairs] result[record_key] = record except Exception: print(eni_conf['Region'],eni_conf['GroupId'] , Exception) continue return pd.DataFrame.from_dict(result,orient='index', columns=['NetworkInterfaceId','GroupId','Port','IsLimitedToAccount','PrivateIpAddress','GroupName','eniDescription','AvailabilityZone','VpcId','Region','IpProtocol','IpRanges','Ipv6Ranges','is_ip_private', 'is_ipv6_private','sg_pairs']).sort_values(by=['NetworkInterfaceId']) def get_vpc_cidrs(eni_conf_table): vpc_region_dict = eni_conf_table[['VpcId','Region']].to_dict('r') vpc_region_dict = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in vpc_region_dict)] vpc_to_cidrs_dict = {} for vpc_tuple in vpc_region_dict: vpc_cidrs = [] vpc_id = vpc_tuple['VpcId'] vpc_details = session.resource('ec2',region_name=vpc_tuple['Region']).Vpc(vpc_id) vpc_cidrs.append(vpc_details.cidr_block) for cidr_association in vpc_details.cidr_block_association_set: if 'CidrBlock' in cidr_association: vpc_cidrs.append(cidr_association['CidrBlock']) vpc_cidrs = list(set(vpc_cidrs)) vpc_to_cidrs_dict[vpc_id] = vpc_cidrs return vpc_to_cidrs_dict # - # ### List network interface and security groups Details eni_sg_conf=add_sg_configuration(eni_conf_table) # #### Enriched Data eni_sg_conf.sort_values(by=['NetworkInterfaceId']) # ### List public facing security groups public_enis = eni_sg_conf.loc[(eni_sg_conf['is_ip_private']==False)& ( (eni_sg_conf['IsLimitedToAccount']==False))] pd.set_option('display.max_rows', public_enis.shape[0]+1) public_enis.head() unique_public_enis = list(set(public_enis['NetworkInterfaceId'].tolist())) unique_public_ports = list(set(public_enis['Port'].tolist())) unique_public_ip_ranges = public_enis['IpRanges'].tolist() unique_public_ipv6_ranges = public_enis['Ipv6Ranges'].tolist() # + public_enis_as_string = ",".join("'" + x + "'" for x in unique_public_enis) public_ports_as_string = ','.join("'" + str(x) + "'" for x in unique_public_ports) print("Public facing ENIs: {}".format(public_enis_as_string)) print("Public facing ports: {}".format(public_ports_as_string)) # - # # Flow research # ## Reduce VPC Data # 1. select fields: account, interfaceid, destinationaddress, destinationport,protocol,action,sourceaddress, sourceport (remove columns: starttime,endtime,numpackets,numbytes,logstatus) # 2. remove duplicated rows (we ignore a lot of columns so it should be significant) # ## Athena configuration session = boto3.Session(profile_name=aws_profile) s3_input = 's3://{}/{}'.format(vpc_flow_log_bucket,vpc_flow_log_object_path) s3_output_bucket_name = '' s3_output_path = 's3://{}'.format(s3_output_bucket_name) database = '' table = '' view_name = 'port_address_view' # + #Function for executing athena queries def run_query(session,query, s3_output, database=None): print("Executing query: {}".format(query)) client = session.client('athena') if (database is None): response = client.start_query_execution( QueryString=query, ResultConfiguration={ 'OutputLocation': s3_output, } ) else: response = client.start_query_execution( QueryString=query, QueryExecutionContext={ 'Database': database }, ResultConfiguration={ 'OutputLocation': s3_output, } ) print('Execution ID: ' + response['QueryExecutionId']) return response def obtain_data(session, filename): try: objectKey = filename + '.csv' print (objectKey) resource = session.resource('s3') response = resource \ .Bucket(s3_output_bucket_name) \ .Object(key= objectKey) \ .get() return pd.read_csv(io.BytesIO(response['Body'].read()), encoding='utf8') except Exception as e: print(e) # Create Athena VPC flow log database and table definition create_database = "CREATE DATABASE IF NOT EXISTS %s;" % (database) # Create VPC flow log table in the created database create_table = \ """CREATE EXTERNAL TABLE IF NOT EXISTS %s.%s ( `version` int, `account` string, `interfaceid` string, `sourceaddress` string, `destinationaddress` string, `sourceport` int, `destinationport` string, `protocol` int, `numpackets` int, `numbytes` bigint, `starttime` int, `endtime` int, `action` string, `logstatus` string ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' LOCATION '%s' TBLPROPERTIES ("skip.header.line.count"="1")""" % ( database, table, s3_input ) # Create the view (interfaceid, destinationport, destinationaddress, numbytes) from the VPC flow log table create_destip_destports_view = \ """ CREATE OR REPLACE VIEW %s AS SELECT interfaceid, destinationport, destinationaddress, numbytes FROM %s WHERE action != 'REJECTED' AND contains (ARRAY[%s], interfaceid) AND contains (ARRAY[%s], destinationport) GROUP BY interfaceid, destinationport,destinationaddress,numbytes""" % (view_name, table, public_enis_as_string, public_ports_as_string) # - # ## Create Athena database, table, and view (ENI, destination port, destination address, numbytes) from VPC flow logs # + # creating the database if not exists create_db_result = run_query(session,create_database, s3_output_path) # Create the flowlogs table combining all collected data from the bucket create_flow_logs_table = run_query(session,create_table, s3_output_path, database) # Create flowlog view to query from create_port_address_view = run_query(session,create_destip_destports_view, s3_output_path, database) print(create_port_address_view) time.sleep(30) # - # ## Query the view with AWS Athena and obtain the results from S3 bucket result_file = run_query(session,"SELECT * FROM {}.{}".format(database,view_name),s3_output_path, database) time.sleep(700) #Set according to expected query time, for ~180GB it's ~350 seconds of query time file_name = result_file['QueryExecutionId'] destport_destaddress_table = obtain_data(session, file_name) destport_destaddress_table.head() # ## Flatten CIDRs of ENI records public_enis_flat_cidrs = public_enis.explode('IpRanges').reset_index() public_enis_flat_cidrs['eni_sg_port_ipranges'] = public_enis_flat_cidrs['index'] +'_'+ public_enis_flat_cidrs['IpRanges'].map(str) public_enis_flat_cidrs = public_enis_flat_cidrs.set_index('eni_sg_port_ipranges').drop('index',axis=1) public_enis_flat_cidrs.head() from netaddr import IPNetwork, IPAddress # + total_traffic_bytes = 0 used_enis = [] def compute_cidrs_in_use(eni_id,ipv4_range,ipv6_range): bytes_per_cidr = 0 cidr_in_use = False for _, row in destport_destaddress_table[destport_destaddress_table['interfaceid'] == eni_id].iterrows(): ip = ipaddress.ip_address(row['destinationaddress']) port = int(row['destinationport']) num_bytes = int(row['numbytes']) if ipv4_range: if ip.version == 4 and ip in ipv4_range and port in unique_public_ports: cidr_in_use = True bytes_per_cidr += num_bytes if ipv6_range: if ip.version == 6 and ip in ipv6_range and port in unique_public_ports: cidr_in_use = True bytes_per_cidr += num_bytes return cidr_in_use,bytes_per_cidr def verify_address_port_in_use(eni_record): global total_traffic_bytes global used_enis ipv4_range = eni_record['IpRanges'] ipv6_range = eni_record['Ipv6Ranges'] eni_id = eni_record['NetworkInterfaceId'] print("Verifying ENI {}, CIDR {}".format(eni_id,ipv4_range)) print("Total Bytes collected" , total_traffic_bytes) cidr_in_use,bytes_per_cidr = compute_cidrs_in_use(eni_id,ipv4_range,ipv6_range) print("Is in use: {}, Number of Bytes {}".format(cidr_in_use,bytes_per_cidr)) if cidr_in_use: used_enis.append(True) total_traffic_bytes += bytes_per_cidr print("Total Bytes collected" , total_traffic_bytes) else: used_enis.append(False) # - print("Verifying {} ENIs".format(len(public_enis_flat_cidrs))) public_enis_flat_cidrs.apply(lambda x: verify_address_port_in_use(x),axis=1) public_enis_flat_cidrs['In Use'] = used_enis public_enis_flat_cidrs.head() # ## Summary enis_merge_flow_log = pd.merge(destport_destaddress_table, public_enis, left_on= ['interfaceid', 'destinationport'], right_on= ['NetworkInterfaceId', 'Port'], how = 'outer') # + print("Open ports that still in use: {} results found".format(len(public_enis_flat_cidrs[public_enis_flat_cidrs['In Use'] == True]))) print("Open ports that are not in use: {} results found".format(len(public_enis_flat_cidrs[public_enis_flat_cidrs['In Use'] == False]))) print("Total bytes transffered in used ports {}".format(total_traffic_bytes)) were_in_use = enis_merge_flow_log[['interfaceid', 'destinationport', 'GroupId']] were_in_use = were_in_use.where(were_in_use['GroupId'].isnull()).dropna(how='all') were_in_use_count = len(were_in_use) print ("ENI and ports that were in use but not anymore (closed): {} results found".format(were_in_use_count)) # - # # Past used ENIs and ports # There is evidence in the VPC flow logs that those ENIs and has no security group attached. Those ENI's and ports are **not active** in current configuration were_in_use # # Used ports # There is evidence in the VPC flow logs that those ENIs and Ports had active traffic. Those ENI's and ports are **active** in current configuration public_enis_flat_cidrs[public_enis_flat_cidrs['In Use']==True][['NetworkInterfaceId','Port','IpRanges','Ipv6Ranges','In Use']] # # Unused ports # There is **no** evidence in the VPC flow logs that those ENIs and Ports had active traffic. Those ENI's and ports are **not active** in current configuration public_enis_flat_cidrs[public_enis_flat_cidrs['In Use']==False][['NetworkInterfaceId','Port','IpRanges','Ipv6Ranges','In Use']] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Optimization Exercise 1 # + [markdown] nbgrader={} # ## Imports # + nbgrader={} # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt from scipy.optimize import minimize # + [markdown] nbgrader={} # ## Hat potential # + [markdown] nbgrader={} # The following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the "hat potential": # # $$ V(x) = -a x^2 + b x^4 $$ # # Write a function `hat(x,a,b)` that returns the value of this function: # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} def hat(x,a,b): return -a*x**2 + b*x**4 # + deletable=false nbgrader={"checksum": "7204bd97cd003430289f171b6ba70d63", "grade": true, "grade_id": "optimizationex01a", "points": 2} assert hat(0.0, 1.0, 1.0)==0.0 assert hat(0.0, 1.0, 1.0)==0.0 assert hat(1.0, 10.0, 1.0)==-9.0 # + [markdown] nbgrader={} # Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$: # + nbgrader={} a = 5.0 b = 1.0 # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} x = np.linspace(-3,3,100); plt.figure(figsize=(8,6)) plt.plot(x,hat(x,a,b)); plt.xlabel('x'); plt.ylabel('V(x)'); plt.title('Hat Potential'); plt.tick_params(axis='x',top='off',direction='out'); plt.tick_params(axis='y',right='off',direction='out'); # + deletable=false nbgrader={"checksum": "bd49ce2f030e3366ee640213f26fdaa6", "grade": true, "grade_id": "optimizationex01b", "points": 2} assert True # leave this to grade the plot # + [markdown] nbgrader={} # Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$. # # * Use `scipy.optimize.minimize` to find the minima. You will have to think carefully about how to get this function to find both minima. # * Print the x values of the minima. # * Plot the function as a blue line. # * On the same axes, show the minima as red circles. # * Customize your visualization to make it beatiful and effective. # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} #Finding the Left Minima guess = (-2) results = minimize(hat,guess,args=(a,b),method = 'SLSQP') xL = results.x print("Left Minima: x = " + str(xL[0])) # - #Finding the Right Minima guess = (2) results = minimize(hat,guess,args=(a,b),method = 'SLSQP') xR = results.x print("Right Minima: x = " + str(xR[0])) x = np.linspace(-3,3,100); plt.figure(figsize=(8,6)) plt.plot(x,hat(x,a,b)); plt.xlabel('x'); plt.ylabel('V(x)'); plt.title('Hat Potential with Minimums'); plt.tick_params(axis='x',top='off',direction='out'); plt.tick_params(axis='y',right='off',direction='out'); plt.plot(xL, hat(xL,a,b), marker='o', linestyle='',color='red'); plt.plot(xR, hat(xR,a,b), marker='o', linestyle='',color='red'); # + deletable=false nbgrader={"checksum": "235361d4c954cf9fd6a8ecef309b3a44", "grade": true, "grade_id": "optimizationex01c", "points": 4} assert True # leave this for grading the plot # + [markdown] nbgrader={} # To check your numerical results, find the locations of the minima analytically. Show and describe the steps in your derivation using LaTeX equations. Evaluate the location of the minima using the above parameters. # + [markdown] deletable=false nbgrader={"checksum": "d7d37614ffa0d469a42ff3fd121335f2", "grade": true, "grade_id": "optimizationex01d", "points": 2, "solution": true} # $$ V(x) = -a x^2 + b x^4 $$ # # To Find the minima we set $\frac{\partial V(x)}{\partial x}$ = 0 # # $$ \frac{\partial V(x)}{\partial x} = -2ax + 4bx^3 = 0 $$ # # Divide by x on both sides. We know x = 0 is not a minimum. It is a local maximum. # # $$ \frac{\partial V(x)}{\partial x} = -2a + 4bx^2 = 0 $$ # # $$ 2a = 4bx^2 $$ # # $$ x^2 = \frac{2a}{4b} $$ # # $$ x = \pm\sqrt{\frac{2a}{4b}} $$ # # $$ x = \pm \sqrt{\frac{5}{2}} = \pm 1.58114 $$ # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Numpy # Numpy is used by most scientific packages in Python # Numpy provides a matrix class # + import numpy as np #Arrays y = np.array([1,3,2]) print('y: ', y) print('shape: ', y.shape) # + import numpy as np #matrix: 2D array mat = np.matrix('1 2; 3 4') print(mat) # - # Reshape function import numpy as np A = np.array([1,1,2,3,5,8,13,21,34]).reshape(3,3) print(A) print('shape: ', A.shape) # Zeros function import numpy as np zero_mat = np.zeros((3,4),dtype=int) print(zero_mat) # Arrange function # + import numpy as np B = np.arange(0,16,3) print(B) B = B.reshape(3,2) print(B) # - # # Arithmetic operations: # + import numpy as np A = np.array([1,1,2,3,5,8,13,21,34]).reshape(3,3) print(A) print(A * 2) print(A / 2) # + import numpy as np #Matrix multiplication A = np.array([1,1,2,3,5,8,13,21,34]).reshape(3,3) print(A) print(A*A) y = np. array([1, 3, 2]) print(A*y) # + #Numpy operations are element wise operations. A = np.array([1,1,2,3,5,8,13,21,34]).reshape(3,3) print(A) #exponentiation print(A**2) #dot function: print(np.dot(A,A)) # + #Inner product x = np.array([1,3,2])[:,None] z = np.array([3,4,5])[:,None] print(np.sum(x*z)) #Transpose print(z.T) #Outter product print(np.dot(x,z.T)) #Element wise product print(x*z) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true #

Table of Contents

# # - # # Introduction # # This notebook reads the files produced by level2_cartopy_resample and plots # them on a map. # # It introduces a new functions to read the image and the area_def # # # Setup # # 1. Run level2_cartopy_resample # # 1. Run the following test script # ``` # python -m a301.install_tests.assign8_test # ``` # to check file locations. # # + from matplotlib import cm import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import Normalize from IPython.display import Image,display from pyresample import geometry import pdb #Image('figures/MYBRGB.A2016224.2100.006.2016237025650.jpg',width=600) # - # %matplotlib inline from matplotlib import cm from a301.scripts.modismeta_read import parseMeta import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import Normalize from IPython.display import Image,display import a301 from pathlib import Path from pyhdf.SD import SD, SDC import pprint import json # # Read in the 1km and 5km water vapor images # # Use the two helper functions below to get the area_def and image # + def area_def_from_dict(area_def_dict): """ given an dictionary produced by dump_area_def return a pyresample area_def Parameters ---------- area_def_dict: dict dictionary containing area_def parameters Returns ------- pyresample area_def object """ keys=['area_id','proj_id','name','proj_dict','x_size','y_size','area_extent'] arglist=[area_def_dict[key] for key in keys] area_def=geometry.AreaDefinition(*arglist) return area_def def get_image(foldername,image_array_name): """ write an image plus mmetadata to a folder under a301.map_dir Parameters ---------- foldername: Path object or string the path to the folder that holds the image files image_array_name: str the root name for the npz and json files i.e. image.npz and image.json Returns: image_array: ndarray with the image area_def: pyresample area_def for image """ image_file=Path(foldername) / Path(image_array_name + '.npz') image_array = np.load(image_file)[image_array_name] json_file = foldername / Path(image_array_name + '.json') with open(json_file,'r') as f: meta_dict=json.load(f) area_def = area_def_from_dict(meta_dict['area_def']) return image_array, area_def # - import cartopy def plot_image(resampled_image,area_def,vmin=0.,vmax=4.,palette='plasma'): """ Make a cartopy plot of an image Parameters ---------- resampled_image: ndarray 2-dimensional image that has be resampled onto an xy grid area_def: pyresample area_def objet the area_def that was used by pyresample vmin,vmax: floats upper and lower limits for the color map palette: str or matplotlib colormap colormap to use for plot Returns ------- fig,ax: matmplotlib figure and axis objects """ if isinstance(palette,str): pal = plt.get_cmap(palette) else: pal = palette pal.set_bad('0.75') #75% grey for out-of-map cells pal.set_over('r') #color cells > vmax red pal.set_under('k') #color cells < vmin black from matplotlib.colors import Normalize the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False) crs = area_def.to_cartopy_crs() fig, ax = plt.subplots(1, 1, figsize=(10,10), subplot_kw={'projection': crs}) ax.gridlines(linewidth=2) ax.add_feature(cartopy.feature.GSHHSFeature(scale='coarse', levels=[1,2,3])); ax.set_extent(crs.bounds,crs) cs=ax.imshow(resampled_image, transform=crs, extent=crs.bounds, origin='upper',alpha=0.8,cmap=pal,norm=the_norm) fig.colorbar(cs,extend='both') return fig, ax foldername=a301.map_dir / Path('wv_maps') image_wv_ir, area_def_lr = get_image(foldername, 'wv_ir') fig,ax=plot_image(image_wv_ir, area_def_lr) ax.set_title('5 km IR water vapor (cm)'); image_wv_nearir_lr, area_def_hr = get_image(foldername, 'wv_nearir_lr') fig,ax=plot_image(image_wv_nearir_lr, area_def_hr) ax.set_title('1 km IR water vapor (cm) at 5k low resolution (lr)'); print(image_wv_nearir_lr.shape) image_wv_nearir_hr, area_def_hr = get_image(foldername, 'wv_nearir_hr') fig,ax=plot_image(image_wv_nearir_hr, area_def_hr) ax.set_title('1 km IR water vapor high resolution (cm)'); print(image_wv_nearir_hr.shape) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import sklearn.model_selection import sklearn.linear_model # %matplotlib inline # - data = np.loadtxt("notas_andes.dat", skiprows=1) # + Y = data[:,4] X = data[:,:4] X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.3) print(np.shape(Y_train), np.shape(X_train)) # - regresion = sklearn.linear_model.LinearRegression() regresion.fit(X_train, Y_train) print(regresion.coef_, regresion.intercept_) print(regresion.score(X_train,Y_train), regresion.score(X_test,Y_test)) # + plt.figure(figsize=(8,8)) labels = ["Fisica I", "Fisica II", "Alg Lin", "Calc Dif"] for i, label in enumerate(labels): plt.subplot(2,2,i+1) plt.scatter(X_train[:,i], Y_train) plt.scatter(X_train[:,i], regresion.predict(X_train), marker='^') plt.xlabel(label) plt.ylabel('PGA') # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''myenv'': conda)' # name: python3 # --- # # The Pumpkin Market # #### Question # Predict the price of a pumpkin for a sale during a given month import pandas as pd pumpkins = pd.read_csv('../data/US-pumpkins.csv') pumpkins.head() # the last 5 rows of data pumpkins.tail() # check missing data in the current dataframe pumpkins.isnull().sum() # the packaging does not has a consistent measurement # so we are adding filter to our dataset pumpkins = pumpkins[pumpkins['Package'].str.contains('bushel', case=True, regex=True)] print(pumpkins) # we might want to drop some data to make it easier to work with new_coloumns = ['Package' , 'Month' , 'Low Price' , 'High Price' , 'Date'] # these are the columns that we want to keep pumpkins = pumpkins.drop([c for c in pumpkins.columns if c not in new_coloumns], axis=1) price = (pumpkins['Low Price'] + pumpkins['High Price']) / 2 month = pd.DatetimeIndex(pumpkins['Date']).month print(month) new_pumpkins = pd.DataFrame({'Month': month, 'Package': pumpkins['Package'], 'Low Price': pumpkins['Low Price'],'High Price': pumpkins['High Price'], 'Price': price}) new_pumpkins.head() print(new_pumpkins) #standardize the pricing per bushel new_pumpkins.loc[new_pumpkins['Package'].str.contains('1 1/9') , 'Price'] = price/ ( 1 + 1/9) new_pumpkins.loc[new_pumpkins['Package'].str.contains('1/2') , 'Price'] = price/ ( 1/2) print(new_pumpkins) # create the visualization import matplotlib.pyplot as plt price = new_pumpkins.Price month = new_pumpkins.Month plt.scatter(price, month) plt.show() new_pumpkins.groupby(['Month'])['Price'].mean().plot(kind='bar') plt.ylabel("Pumpkin Price") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Datasets - Reduced data, IRFs, models # # ## Introduction # # `gammapy.datasets` are a crucial part of the gammapy API. `datasets` constitute `DL4` data - binned counts, IRFs, models and the associated likelihoods. `Datasets` from the end product of the `makers` stage, see [makers notebook](makers.ipynb), and are passed on to the `Fit` or estimator classes for modelling and fitting purposes. # # To find the different types of `Dataset` that are supported see [Datasets home](../../datasets/index.rst#Types-of-supported-datasets) # # # ## Setup import numpy as np import astropy.units as u from astropy.time import Time from regions import CircleSkyRegion from astropy.coordinates import SkyCoord from gammapy.datasets import ( MapDataset, SpectrumDataset, Datasets, FluxPointsDataset, ) from gammapy.data import DataStore from gammapy.maps import WcsGeom, RegionGeom, MapAxes, MapAxis, Map from gammapy.modeling.models import SkyModel, PowerLawSpectralModel from gammapy.estimators import FluxPoints # ## MapDataset # # The counts, exposure, background, masks, and IRF maps are bundled together in a data structure named `MapDataset`. While the `counts`, and `background` maps are binned in reconstructed energy and must have the same geometry, the IRF maps can have a different spatial (coarsely binned and larger) geometry and spectral range (binned in true energies). It is usually recommened that the true energy bin should be larger and more finely sampled and the reco energy bin. # # ### Creating an empty dataset # # An empty `MapDataset` can be instantiated from any `WcsGeom` object. Binnings of the all IRF axes can be individually configued, otherwise, internal defaults will be selected. # + energy_axis = MapAxis.from_energy_bounds( 1, 10, nbin=11, name="energy", unit="TeV" ) geom = WcsGeom.create( skydir=(83.63, 22.01), axes=[energy_axis], width=5 * u.deg, binsz=0.05 * u.deg, frame="icrs", ) energy_axis_true = MapAxis.from_energy_bounds( 0.1, 100, nbin=11, name="energy_true", unit="TeV", per_decade=True ) rad_axis = MapAxis.from_bounds(0, 5, nbin=50, unit="deg", name="rad") dataset_empty = MapDataset.create( geom=geom, energy_axis_true=energy_axis_true, rad_axis=rad_axis, binsz_irf=0.1, ) # - dataset_empty.edisp # To see the geometry of each map, we can dataset_empty.geoms # To see how to use the `dataset_empty` in the data reduction process, please see the [makers notebook](makers.ipynb) # ### Reading and write datasets # # Datasets can be read from and saved to disk using the `read` and write commands. This saves the various `Map` attributes of the dataset as different HDUs of a single FITS file. The Maps are currently stored according to the [gadf specifications for skymaps](https://gamma-astro-data-formats.readthedocs.io/en/latest/skymaps/index.html) dataset = MapDataset.read( "$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz", name="test" ) # **Note**: The dataset name is a very important attribute. They act as unique identifier for a dataset within `datasets`. No two datasets can have the same name. Models are linked to datasets through the dataset name. See, [model management](model_management.ipynb) for details # ## Accessing contents of a dataset # To explore the contents of a `Dataset`, you can simply print(dataset) # For a quick info, use dataset.info_dict() # To access the individual components of a dataset, eg background, you can simply dataset.background # `Dataset.background` contains the background map computed from the IRF. # To see the model corrected background, use `dataset.npred_background()`. # To compute the predicted counts from a particluar source model, use `dataset.npred_signal(model_name)` # # *Note* - The reduced IRFs, counts, backgrounds and the model predicted counts, ie, npred(), are all stored as `maps` on a dataset. Standard `Map` operations can be performed on these, eg, see: [maps notebook](maps.ipynb). The `psf` and `edisp` are stored as `~gammapy.irf.PSFKernelMap` and `~gammapy.irf.EDispKernelMap`, respectively, see the associted documentation for further details. # ### Using masks # # There are two masks that can be set on a `Dataset`, `mask_safe` and `mask_fit`. # # - The `mask_safe` is computed during the data reduction process according to the specified selection cuts, and should not be changed by the user. # - During modelling and fitting, the user might want to additionally ignore some parts of a reduced dataset, e.g. to restrict the fit to a specific energy range or to ignore parts of the region of interest. This should be done by applying the `mask_fit`. To see details of applying masks, please refer to [Masks-for-fitting](mask_maps.ipynb#Masks-for-fitting:-mask_fit) # # Both the `mask_fit` and `mask_safe` must have the safe `geom` as the `counts` and `background` maps. # eg: to see the safe data range dataset.mask_safe.plot_interactive(add_cbar=True); # To apply a mask fit - in enegy and space region = CircleSkyRegion( SkyCoord(2.1, 1.5, unit="deg", frame="galactic"), 0.7 * u.deg ) mask_space = dataset.geoms["geom"].region_mask([region], inside=False) mask_energy = dataset.geoms["geom"].energy_mask(0.6 * u.TeV, 4 * u.TeV) mask = mask_space & mask_energy # standard binary operations allowed on masks dataset.mask_fit = mask dataset.mask_fit.plot_grid(); # To see the allowed energy ranges, you can use # - `dataset.energy_range_safe` : energy range allowed by the `mask_safe` # - `dataset.energy_range_fit` : energy range allowed by the `mask_fit` # - `dataset.energy_range` : the final energy range used in likelihood computation # # These methods return two maps, with the `min` and `max` energy values at each spatial pixel dataset.energy_range # To see the lower energy threshold at each point dataset.energy_range[0].plot(add_cbar=True) # ### Downsampling datasets # # It can often be useful to coarsely rebin an initially computed datasets by a specfied factor. The number of counts. are preserved. By default only spatial axes are downsampled, but additional axes can be specified, eg downsampled_dataset = dataset.downsample( factor=10, axis_name="energy", name="downsampled_dataset" ) print(downsampled_dataset, dataset) # ## SpectrumDataset # # `SpectrumDataset` inherits from a `MapDataset`, and is specially adapted for 1D spectral analysis, and uses a `RegionGeom` instead of a `WcsGeom`. # A `MapDatset` can be converted to a `SpectrumDataset`, by summing the `counts` and `background` inside the `on_region`, which can then be used for classical spectral analysis. Containment correction is feasible only for circular regions. # + nbsphinx-thumbnail={"tooltip": "Datasets API notebook"} on_region = CircleSkyRegion( SkyCoord(0, 0, unit="deg", frame="galactic"), 0.5 * u.deg ) spectrum_dataset = dataset.to_spectrum_dataset( on_region, containment_correction=True ) # - # For a quick look spectrum_dataset.peek(); # A `MapDataset` can also be integrated over the `on_region` to create a `MapDataset` with a `RegionGeom`. Complex regions can be handled and since the full IRFs are used, containment correction is not required. reg_dataset = dataset.to_region_map_dataset(on_region, name="RegionMapDS") print(reg_dataset) # ## FluxPointsDataset # # `FluxPointsDataset` is a `Dataset` container for precomputed flux points, which can be then used in fitting. # `FluxPointsDataset` cannot be read directly, but should be read through `FluxPoints`, with an additional `SkyModel`. Similarly, `FluxPointsDataset.write` only saves the `data` component to disk. flux_points = FluxPoints.read( "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits" ) model = SkyModel(spectral_model=PowerLawSpectralModel()) fp_dataset = FluxPointsDataset(data=flux_points, models=model) # The masks on `FluxPointsDataset` are `np.array` and the data is a `FluxPoints` object. The `mask_safe`, by default, masks the upper limit points fp_dataset.mask_safe # Note: the mask here is simply a numpy array fp_dataset.data # is a `FluxPoints` object fp_dataset.data_shape() # number of data points # For an example of fitting `FluxPoints`, see [flux point fitting](../analysis/1D/sed_fitting), and can be used for catalog objects, eg see [catalog notebook](catalog.ipynb) # ## Datasets # `Datasets` are a collection of `Dataset` objects. They can be of the same type, or of different types, eg: mix of `FluxPointDataset`, `MapDataset` and `SpectrumDataset`. # # For modelling and fitting of a list of `Dataset` objects, you can either # - Do a joint fitting of all the datasets together # - Stack the datasets together, and then fit them. # # `Datasets` is a convenient tool to handle joint fitting of simlutaneous datasets. As an example, please see the [joint fitting tutorial](../3D/analysis_mwl.ipynb) # # To see how stacking is performed, please see [Implementation of stacking](../../datasets/index.html#stacking-multiple-datasets) # # To create a `Datasets` object, pass a list of `Dataset` on init, eg # Create some dummy datasets for example purposes dataset1 = dataset.copy(name="dataset1") dataset2 = dataset.copy(name="dataset2") dataset3 = dataset.copy(name="dataset3") datasets = Datasets([dataset1, dataset2, dataset3]) datasets.info_table() # quick info of all datasets datasets.names # unique name of each dataset # Normal list operations work on `Datasets`, so dataset0 = datasets[0] # extracts the first dataset # To select certain datasets within a given time interval, pass `astropy.time.Time` objects to `Datasets.select_time()` datasets_sub = datasets.select_time( time_min=Time(51544, format="mjd"), time_max=Time(51554, format="mjd") ) # If all the datasets have equivalent geometries, they can be stacked together stacked = datasets.stack_reduce(name="stacked") print(stacked) # Use python list convention to remove/add datasets, eg: datasets.remove("dataset2") datasets.names datasets.append(dataset2) datasets.names # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from base58 import b58decode, b58encode from solana.message import * from solana.blockhash import Blockhash from solana.publickey import PublicKey from solana.transaction import Transaction, SIG_LENGTH def print_message(msg): print("Header: ", msg.header) print("Recent Blockhash: ", b58decode(msg.recent_blockhash).hex()) any((print(f"key{i + 1}: {bytes(key)}") for i, key in enumerate(msg.account_keys))) print("Instructions: ", msg.instructions) # - message_header = MessageHeader(0, 3, 2) accounts_keys = [str(PublicKey(i)) for i in range(1, 6)] recent_blockhash = Blockhash(str(PublicKey(1))) instructions = [CompiledInstruction(accounts=[1,2,3], data=b58encode(bytes([9] * 5)), program_id_index=4)] args = MessageArgs(message_header, accounts_keys, recent_blockhash, instructions) msg = Message(args) msg_bytes = msg.serialize() msg_bytes decoded_msg = msg.deserialize(msg_bytes) assert decoded_msg.header == message_header assert decoded_msg.recent_blockhash == recent_blockhash print_message(decoded_msg) signatures = [ b58encode(bytes([1] * SIG_LENGTH)), b58encode(bytes([2] * SIG_LENGTH)), ]; # Note: Signatures will be added to transaction on each run tx = Transaction.populate(msg, signatures) tx_msg = tx.compile_message() assert tx.serialize_message() == tx_msg.serialize() assert len(tx_msg.instructions) == len(tx.instructions) == 1 assert tx_msg.recent_blockhash == tx.recent_blockhash == recent_blockhash print_message(tx_msg) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Section III: Analysis # # In this notebook the trajectory produced in the simulation section is analyzed. The following parameters are calculated: # - Area per lipid (APL) # - Tilt Angle # - Nematic Order Parameter (S2) # - Neutron Scattering Length Density (NSLD) # - Bilayer Height # Load the final trajectory into MDTraj and ensure that the correct number of frames (2001), atoms (15288), and residues (2952) are present. # + from copy import deepcopy import numpy as np import pandas as pd import mdtraj as md import matplotlib.pyplot as plt from scipy.signal import find_peaks traj = md.load("../simulation/5_prod/traj_wrapped.xtc", top="../simulation/5_prod/confout.gro") print(traj) # - # ## Area Per Lipid # # The area per lipid is calculated as the cross sectional area divided by the number of lipids for each leaflet. # + # Get box area: Lx * Ly area = traj.unitcell_lengths[:,0] * traj.unitcell_lengths[:,1] # Specify the number of lipids and number of leaflets in the system number_of_lipids = 72 number_of_leaflets = 2 # Calculate the area per lipid: the area divided by the number of lipids per leaflet apl = area / (number_of_lipids / number_of_leaflets) # Determine the average area per lipid and convert to square angstroms apl_mean = np.mean(apl) * 100 print(f"The average area per lipid is {apl_mean:.2f} Å^2") # - # ## Tilt Angle # # The tilt angle is the angle between the lipid tail director and the bilayer normal. See [Moore, et al, 2018](https://doi.org/10.1016/j.bpj.2017.10.031) for more details. # + # Dictionary defining the tail indices for each lipid type tail_indices = {"cer" : tuple([np.arange(0, 74), # Sphingosine tail np.arange(74, 129)]), # Fatty Acid Tail "ffa" : tuple([np.arange(0, 74)]), # Whole molecule "chol" : tuple([np.arange(0, 74)]) # Whole molecule } # Inititalize list of tails and populate with indices of each tail tails = [] for residue in traj.top.residues: if residue.name == "tip3p": continue residue_tails = tail_indices[residue.name] residue_indices = np.array([atom.index for atom in residue.atoms]) for tail in residue_tails: tails.append(list(residue_indices.take(tail))) # Ensure that each element in the list of indices is an int for i in range(len(tails)): for j in range(len(tails[i])): tails[i][j] = int(tails[i][j]) # Compute the directors for each tail and reshape to make an (:,3) shape list. directors = md.compute_directors(traj=traj, indices=tails)#.reshape(-1, 3) # Compute the angle between the bilayer normal vector ([0, 0, +/- 1]) and the director dot_product = np.dot(directors, np.array([0, 0, 1])) angles = np.arccos(dot_product) * 180.0 / np.pi # Make sure that angles are below 90 degrees angles = angles + (angles > 90) * (180 - 2*angles) # Get the average tilt angle over all lipids over all frames angles_mean = np.mean(angles) print(f"The average tilt angle is {angles_mean:.1f}°") # - # ## Nematic Order Parameter # # The nematic order parameter is calculated by determining the largest eigenvalue of the Q-tensor. See [Moore, et al, 2018](https://doi.org/10.1016/j.bpj.2017.10.031) for more details. # + # We can use the same definition of the tail indices as in the tilt angle calculation (see above) # Compute the nematic order using MDTraj s2 = md.compute_nematic_order(traj=traj, indices=tails) # Get the average nematic order over all frames s2_mean = np.mean(s2) print(f"The average nematic order is {s2_mean:.4f}") # - # ## Neutron Scattering Length Density # # The NSLD is calculated by plotting a z-profile histogram of the system weighted by the scattering length. This is normalized by the volume of the slice. Scattering lengths for each atom/isotope are found here: https://www.ncnr.nist.gov/resources/n-lengths/ # Define scattering lengths for each element scattering_length = {"hydrogen" : -3.74, "deuterium" : 6.671, "carbon" : 6.6511, "nitrogen" : 9.37, "oxygen" : 5.803 } # The total neutron scattering length density is calculated and plotted below. Reference data from , , , , , , , *Biophysical Journal*, **100**(6), 2011, p1481-1489, https://doi.org/10.1016/j.bpj.2011.02.001. # + # Get a list of scattering lengths for each index scattering_lengths = [scattering_length[atom.element.name] for atom in traj.top.atoms] # Compute a histogram of the total scattering length vs the z-coordinate. nsld, edges = np.histogram(traj.xyz[:,:,2].reshape(-1), range=[3,np.mean(traj.unitcell_lengths[:,2])-3], bins=200, weights=np.array(scattering_lengths*traj.n_frames)) bins = (edges[1:] + edges[:-1]) * 0.5 # Divide by number of frames nsld /= traj.n_frames # Divide by bin width and box area to get density nsld /= (bins[1]-bins[0]) * np.mean(traj.unitcell_lengths[:,0] * traj.unitcell_lengths[:,1]) # Move bins to center bins -= np.mean(bins) # Convert to angstroms bins *= 10 nsld *= 0.01 # Plot the simulation scattering length density plt.figure(dpi=350, figsize=[5, 3]) plt.plot(bins, nsld, '-r', label="Simulated") # Load in reference data and rescale to match simulated NSLD reference_nsld = np.loadtxt("ref/experimental_nsld.txt") reference_nsld[:,1] *= (np.max(nsld) - np.min(nsld)) / (np.max(reference_nsld[:,1]) - np.min(reference_nsld[:,1])) reference_nsld[:,1] += np.min(nsld) - np.min(reference_nsld[:,1]) # Plot reference experimental data plt.plot(reference_nsld[:,0], reference_nsld[:,1], '--k', label="Experimental") plt.xlabel("z_coordinate (Å)") plt.ylabel("NSLD (A.u.)") plt.xlim((-30, 30)) plt.legend() # - # In addition, the deuterated profiles for each lipid type (assuming that all hydrogens in the molecule are deuterated) are also plotted. # + def plot_deuterated_NSLD(profile_name, deuteriums): """Worker function to calculate deuterated profile and plot to a matplotlib Figure. """ scattering_lengths_deuterated = deepcopy(scattering_lengths) for deuterium in deuteriums: scattering_lengths_deuterated[deuterium] = scattering_length["deuterium"] nsld_deuterated, edges = np.histogram(traj.xyz[:,:,2].reshape(-1), range=[2,np.mean(traj.unitcell_lengths[:,2])-2], bins=200, weights=np.array(scattering_lengths_deuterated*traj.n_frames)) bins = (edges[1:] + edges[:-1]) * 0.5 bins -= np.mean(bins) nsld_deuterated /= traj.n_frames nsld_deuterated /= (bins[1]-bins[0]) * np.mean(traj.unitcell_lengths[:,0] * traj.unitcell_lengths[:,1]) # Subtract the protonated NSLD from the deuterated nsld_deuterated = nsld_deuterated - nsld plt.plot(bins*10, nsld_deuterated*0.01, label=f"Deuterated {profile_name}") # Repeat for each lipid type with deuteration plt.figure(dpi=350, figsize=[5, 3]) for lipid_name in ("cer", "chol", "ffa"): if lipid_name == "cer": # Sphingosine tails for tail_number, tail_name in enumerate(["cer fa.", "cer sph."]): deuteriums = [] for residue in traj.top.residues: if residue.name in {"tip3p", "chol", "ffa"}: continue residue_tails = tail_indices[residue.name] residue_indices = np.array([atom.index for atom in residue.atoms]) deuteriums.append(residue_indices.take(residue_tails[tail_number])) deuteriums = np.array(deuteriums).reshape(-1) plot_deuterated_NSLD(tail_name, deuteriums) else: deuteriums = traj.top.select(f"resname {lipid_name} and element H") plot_deuterated_NSLD(lipid_name, deuteriums) plt.legend(ncol=2, bbox_to_anchor=(0.95, -0.2)) plt.xlabel("z_coordinate (Å)") plt.ylabel("NSLD (A.u.)") # - # Finally, the bilayer height calculated by measuring the distance between the two peaks of the NSLD, which correspond to the location of the location head groups. # + # Find peaks in the NSLD using the find_peaks function in the scipy package peaks, properties = find_peaks(nsld, height=0.8, distance=50) # Make sure that there are only 2 peaks found. # If there are more or less, there is something wrong with the trajectory assert len(peaks) == 2 # Calculate the height (difference z-positions). height = (bins[peaks[1]] - bins[peaks[0]]) print(f"The average bilayer height is {height:.2f} Å") # - # A set of reference data from a previously run notebook is saved in the `ref/` directory. Verify that the structural metrics obtained in *your* simulation (row 1) match these results (row 0). # + # Create pandas dataframe with your calculated structral parameters. your_data = {'height (Å)': [height], 'apl (Å^2)': [apl_mean], 'tilt_angle (°)': [angles_mean], 's2' : [s2_mean]} your_data = pd.DataFrame.from_dict(your_data) # Load reference data reference_data = pd.read_csv("ref/reference_structure.csv") comparison = pd.concat((reference_data, your_data), ignore_index=True) # Print the concatenated table comparison # - # Print the difference between rows to see how far you were off comparison.diff() # ## Discussion # # The simulated protonated NSLD had the same general shape as the experimental NSLD with a small peak at the center of the bilayer, which represents the interdigitation of the lipid tails. However, the locations of large peaks representing the headgroup region and of the "shoulders" at ±15 Å of the experimental NSLD are not replicated in the simulated NSLD. To capture these details, one would need to simulate a more complex system containing a mixture of ceramide types and lipid tail lengths with a composition similar to the experimental mixture, and implement a more effective annealing technique (such as RWMD) to ensure fully decorrelated headgroup positions [(see Moore et al. 2018)](https://doi.org/10.1016/j.bpj.2017.10.031). # # According to the deuterated NSLD plots, we can see that there is significant interdigitation of the ceramide fatty acid and free fatty acid tails, which both contain 24 carbons, indicated by the large high-density peak at the center of the bilayer. The shorter 18-carbon sphingosine chain has a low-density trough at the center of the bilayer similar to the behavior of cholesterol, which suggests that these lipids do not interdigitate. It is also evident that the cholesterol molecules are located closer to the center of the bilayer than the other lipids. # # This experiment can be further expanded by screening several compositions or water content to examine the effect of varying these parameters on the structure of the bilayer. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## ML Assignment 3 # ### Team Members # #### 1. (2175052) # #### 2. (2175048) # # ### Question: # #### To do feature selection using Information gain and Forward Selection, and classify using SVC using rbf, poly and linear kernel import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_excel('Immunotherapy.xlsx') X = dataset.drop(['Result_of_Treatment'], axis=1) y = dataset['Result_of_Treatment'] print(dataset.head()) # #### Information Gain Feature Selection from sklearn.feature_selection import mutual_info_classif from sklearn.feature_selection import SelectKBest, SelectPercentile mutual_info = mutual_info_classif(X.fillna(0), y) mi_series = pd.Series(mutual_info) mi_series.index = X.columns mi_series.sort_values(ascending=False) mi_series.sort_values(ascending=False).plot.bar(figsize=(10,4)) k_best_features = SelectKBest(mutual_info_classif, k=5).fit(X.fillna(0), y) print('Selected top 5 features: {}'.format(X.columns[k_best_features.get_support()])) features_selected = X.columns[k_best_features.get_support()] # #### Building model with features selected X = dataset[features_selected].to_numpy() y = y.to_numpy() # + from sklearn.model_selection import KFold kf = KFold(n_splits=10) for train_index, test_index in kf.split(X,y): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] # - # ### Building SVC with Linear Kernel from sklearn.svm import SVC classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) # + # ROC from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1) auc = metrics.auc(fpr, tpr) print("Area under the Curve : ", auc) # Plotting AUC (Area Under the Curve) from sklearn.metrics import roc_auc_score, roc_curve ns_probs = [0 for _ in range(len(y_test))] lr_probs = y_pred ns_auc = roc_auc_score(y_test, ns_probs) lr_auc = roc_auc_score(y_test, lr_probs) print('No Skill: ROC AUC=%.3f' % (ns_auc)) print('Logistic: ROC AUC=%.3f' % (lr_auc)) # calculate roc curves ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs) lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs) # plot the roc curve for the model plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend() # show the plot plt.show() # - # ### Building SVC with Poly Kernel from sklearn.svm import SVC classifier = SVC(kernel = 'poly', random_state = 0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) # + # ROC from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1) auc = metrics.auc(fpr, tpr) print("Area under the Curve : ", auc) # Plotting AUC (Area Under the Curve) from sklearn.metrics import roc_auc_score, roc_curve ns_probs = [0 for _ in range(len(y_test))] lr_probs = y_pred ns_auc = roc_auc_score(y_test, ns_probs) lr_auc = roc_auc_score(y_test, lr_probs) print('No Skill: ROC AUC=%.3f' % (ns_auc)) print('Logistic: ROC AUC=%.3f' % (lr_auc)) # calculate roc curves ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs) lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs) # plot the roc curve for the model plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend() # show the plot plt.show() # - # ### Building SVC with rbf kernel # + from sklearn.model_selection import GridSearchCV param_grid = {'C': [0.1, 1, 10, 100, 1000], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001], 'kernel': ['rbf']} grid = GridSearchCV(SVC(), param_grid, refit = True, verbose = 3) # - grid.fit(X_train, y_train) print(grid.best_params_) print(grid.best_estimator_) grid_predictions = grid.predict(X_test) print(classification_report(y_test, grid_predictions)) # + # ROC from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1) auc = metrics.auc(fpr, tpr) print("Area under the Curve : ", auc) # Plotting AUC (Area Under the Curve) from sklearn.metrics import roc_auc_score, roc_curve ns_probs = [0 for _ in range(len(y_test))] lr_probs = y_pred ns_auc = roc_auc_score(y_test, ns_probs) lr_auc = roc_auc_score(y_test, lr_probs) print('No Skill: ROC AUC=%.3f' % (ns_auc)) print('Logistic: ROC AUC=%.3f' % (lr_auc)) # calculate roc curves ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs) lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs) # plot the roc curve for the model plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend() # show the plot plt.show() # - # ### Forward Feature Selection X = dataset.drop(['Result_of_Treatment'], axis=1) y = dataset['Result_of_Treatment'] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101) X_train.shape, X_test.shape # + def correlation(dataset, threshold): col_corr = set() # Set of all the names of correlated columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j]) > threshold: # we are interested in absolute coeff value colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) return col_corr corr_features = correlation(X_train, 0.8) print('correlated features: ', len(set(corr_features)) ) # + # removed correlated features X_train.drop(labels=corr_features, axis=1, inplace=True) X_test.drop(labels=corr_features, axis=1, inplace=True) X_train.shape, X_test.shape # - from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.svm import SVC sfs1 = SFS(SVC(kernel='linear'), k_features=10, forward=True, floating=False, verbose=2, scoring='roc_auc', cv=3) sfs1 = sfs1.fit(np.array(X_train.fillna(0)), y_train) # # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # BanFakeNews # ## Setup and import libraries # + # Automatically reloading imported modules # %load_ext autoreload # %autoreload 2 import sys sys.path.append('../..') # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from src.helpers import * pd.set_option('display.max_columns', None) # + # Change design of plots sns.set(style="whitegrid") # Change sizes and resolution of plots plt.rcParams['figure.figsize'] = (10, 6) # %config InlineBackend.figure_format='retina' plt.rcParams.update({'font.size': 15}) # Hide warnings import warnings warnings.filterwarnings('ignore') # - # ## Load the data # + df_labeled_authentic_7k = pd.read_csv('data/LabeledAuthentic-7K.csv') df_labeled_fake_1k = pd.read_csv('data/LabeledFake-1K.csv') df = pd.concat([df_labeled_authentic_7k, df_labeled_fake_1k], ignore_index=True) # - # ## General descriptive analysis # Let's check shape of the data - number of rows and attributes: df.shape # Overview of the data: df.head() # ### Datatypes # # **Note:** Be careful, attributes with only NaN values are considered as `float64` type by default. df.dtypes # ### Basic characteristics df.describe() df.describe(exclude=[np.number]) # ### One-value columns # # Which attributes contain only one value? one_value_attributes_analysis(df) # ### Missing values # # Analysis of missing values in attributes: missing_values_analysis(df) # ### Duplicates # # Are there any duplicates? df.duplicated().any() # ## Attributes analysis # # Analysis of all attributes: # + skip_attributes = [ 'articleID', 'domain', 'date', 'headline', 'content' # also skipping textual attributes, because of Bengali language (not supported) ] # attributes to skip in analysis (e.g. id) textual_attributes = [ ] # attributes with text values (e.g. content of article) textual_attributes = list(filter(lambda value: value not in skip_attributes, textual_attributes)) numerical_attributes = list(df.select_dtypes([np.number]).columns) numerical_attributes = list(filter(lambda value: value not in textual_attributes + skip_attributes, numerical_attributes)) categorical_attributes = list(df.select_dtypes(['object', 'category', 'bool']).columns) categorical_attributes = list(filter(lambda value: value not in textual_attributes + skip_attributes, categorical_attributes)) label_column = 'label' # attribute considered as "label" # - # ### Label attribute distribution df[label_column].value_counts().plot(kind='pie', title='Distribution of predicted classes'); df[label_column].value_counts().plot(kind='bar', title='Distribution of predicted classes'); # ### Numerical attributes # # Analysis of numerical attributes: analyse_numerical_attributes(df, label_column, numerical_attributes) # ### Categorical attributes # # Analysis of categorical attributes: analyse_categorical_attributes(df, label_column, categorical_attributes) # ### Textual attributes # # Some parts of analysis include preprocessing text. In this case, the following operations are performed: # * removing special characters (only letters are preserved), # * removing tokens shorter than 3 characters, # * removing tokens that are in english stop-words defined by NLTK library, # * removing accent marks from tokens. # # Analysis of textual attributes: analyse_textual_attributes(df, textual_attributes) # ## Pairwise analysis # # Pairwise analysis of attributes (numerical attributes): # ### Pair analysis if numerical_attributes and len(numerical_attributes) > 1: sns.pairplot(df, vars=numerical_attributes, hue=label_column); # ### Correlations # # Correlation matrix: if numerical_attributes and len(numerical_attributes) > 1: check_correlations(df, numerical_attributes) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Beautiful Soup: Build a Web Scraper With Python # # ## What Is Web Scraping? # # **Web scraping** - is the process of gathering information from the Internet. Even copying and pasting the lyrics of your favorite song is a form of web scraping! However, the words “web scraping” usually refer to a process that involves automation. Some websites don’t like it when automatic scrapers gather their data, while others don’t mind. # # If you’re scraping a page respectfully for educational purposes, then you’re unlikely to have any problems. Still, it’s a good idea to do some research on your own and make sure that you’re not violating any Terms of Service before you start a large-scale project. # # ## Challenges of Web Scraping # # The Web has grown organically out of many sources. It combines many different technologies, styles, and personalities, and it continues to grow to this day. In other words, the Web is a hot mess! Because of this, you’ll run into some challenges when scraping the Web: # # * **Variety**: Every website is different. While you’ll encounter general structures that repeat themselves, each website is unique and will need personal treatment if you want to extract the relevant information. # * **Durability**: Websites constantly change. Say you’ve built a shiny new web scraper that automatically cherry-picks what you want from your resource of interest. The 1st time you run your script, it works flawlessly. But when you run the same script only a short while later, you run into a discouraging and lengthy stack of tracebacks! # # Unstable scripts are a realistic scenario, as many websites are in active development. Once the site’s structure has changed, your scraper might not be able to navigate the sitemap correctly or find the relevant information. The good news is that many changes to websites are small and incremental, so you’ll likely be able to update your scraper with only minimal adjustments. # # However, keep in mind that because the Internet is dynamic, the scrapers you’ll build will probably require constant maintenance. You can set up continuous integration to run scraping tests periodically to ensure that your main script doesn’t break without your knowledge. # # ## An Alternative to Web Scraping: APIs # # Some website providers offer **application programming interfaces (APIs)** that allow you to access their data in a predefined manner. With APIs, you can avoid parsing HTML. Instead, you can access the data directly using formats like JSON and XML. HTML is primarily a way to present content to users visually. # # When you use an API, the process is generally more stable than gathering the data through web scraping. That’s because developers create APIs to be consumed by programs rather than by human eyes. # # The front-end presentation of a site might change often, but such a change in the website’s design doesn’t affect its API structure. The structure of an API is usually more permanent, which means it’s a more reliable source of the site’s data. # # However, APIs can change as well. The challenges of both variety and durability apply to APIs just as they do to websites. Additionally, it’s much harder to inspect the structure of an API by yourself if the provided documentation lacks quality. # # ## Scrape the Fake Python Job Site # # In this tutorial, you’ll build a web scraper that fetches Python software developer job listings from the Fake Python Jobs site. It’s an example site with fake job postings that you can freely scrape to train your skills. Your web scraper will parse the HTML on the site to pick out the relevant information and filter that content for specific words. # # ## Step 1: Inspect Your Data Source # # Before you write any Python code, you need to get to know the website that you want to scrape. You’ll need to understand the site structure to extract the information that’s relevant for you. Start by opening the site you want to scrape with your favorite browser. # # ## Decipher the Information in URLs # # A programmer can encode a lot of information in a URL. Your web scraping journey will be much easier if you 1st become familiar with how URLs work and what they’re made of. For example, you might find yourself on a details page that has the following URL: # # `https://realpython.github.io/fake-jobs/jobs/senior-python-developer-0.html` # # You can deconstruct the above URL into two main parts: # # The **base URL** - represents the path to the search functionality of the website. In the example above, the base URL is `https://realpython.github.io/fake-jobs/`. The specific site location that ends with `.html` is the path to the job description’s unique resource. Any job posted on this website will use the same base URL. However, the unique resources’ location will be different depending on what specific job posting you’re viewing. # # URLs can hold more information than just the location of a file, some websites use **query parameters** to encode values that you submit when performing a search. You can think of them as query strings that you send to the database to retrieve specific records. # # You’ll find query parameters at the end of a URL. For example, if you go to Indeed and search for “software developer” in “Australia” through their search bar, you’ll see that the URL changes to include these values as query parameters: # # `https://au.indeed.com/jobs?q=software+developer&l=Australia` # # The query parameters in this URL are `?q=software+developer&l=Australia`. Query parameters consist of 3 parts: # # 1. **Start**: The beginning of the query parameters is denoted by a question mark (`?`). # 2. **Information**: The pieces of information constituting 1 query parameter are encoded in key-value pairs, where related keys and values are joined together by an equals sign (`key=value`). # 3. **Separator**: Every URL can have multiple query parameters, separated by an ampersand symbol (`&`). # # Equipped with this information, you can pick apart the URL’s query parameters into two key-value pairs: # # * `q=software+developer` - selects the type of job # * `l=Australia` - selects the location of the job # # ## Inspect the Site Using Developer Tools # # You’ll need to understand the page structure to pick what you want from the HTML response that you’ll collect in one of the upcoming steps. # # **Developer tools** can help you understand the structure of a website. In Chrome on macOS, you can open up the developer tools through the menu by selecting **View → Developer → Developer Tools**. On Windows and Linux, you can access them by clicking the top-right menu button (`⋮`) and selecting **More Tools → Developer Tools**. You can also access your developer tools by right-clicking on the page and selecting the Inspect option or using a keyboard shortcut: # # * Mac: `Cmd+Alt+I` # * Windows/Linux: `Ctrl+Shift+I` # # **Developer tools** - allow you to interactively explore the site’s **document object model** (**DOM**) to better understand your source. # # ## Step 2: Scrape HTML Content From a Page # # First, you’ll want to get the site’s HTML code into your Python script so that you can interact with it. For this task, you’ll use Python’s `requests` library: # + import requests URL = "https://realpython.github.io/fake-jobs/" page = requests.get(URL) print(page.text[0:350], '\n...') # - # This code retrieves the HTML data that the server sends back and stores that data in a Python object. # # ## Static Websites # # The website that you’re scraping in this tutorial serves **static HTML content**. The server that hosts the site sends back HTML documents that **already contain all the data that you’ll get to see as a user**. # # The HTML of this job board has descriptive **class names** on the elements that you’re interested in: # # * `class="title is-5"` - contains the title of the job posting # * `class="subtitle is-6 company"` - contains the name of the company that offers the position # * `class="location"` - contains the location where you’d be working # # However, there are more challenging situations that you might encounter when you’re scraping websites. Before you learn how to pick the relevant information from the HTML that you just scraped, you’ll take a quick look at 2 of these more challenging situations. # # ## Hidden Websites # # Some pages contain information that’s hidden behind a login. That means you’ll need an account to be able to scrape anything from the page. The process to make an HTTP request from your Python script is different from how you access a page from your browser. Just because you can log in to the page through your browser doesn’t mean you’ll be able to scrape it with your Python script. # # The `requests` library comes with the built-in capacity to handle authentication. With these techniques, you can log in to websites when making the HTTP request from your Python script and then scrape information that’s hidden behind a login. # # ## Dynamic Websites # # **Static sites** - are straightforward to work with because the server sends you an HTML page that already contains all the page information in the response. You can parse that HTML response and immediately begin to pick out the relevant data. # # On the other hand, with a **dynamic website**, the server might not send back any HTML at all. Instead, you could receive **JavaScript code as a response**. Many modern web applications are designed to provide their functionality in collaboration with the clients’ browsers. Instead of sending HTML pages, these apps send JavaScript code that instructs your browser to create the desired HTML. Web apps deliver dynamic content in this way to offload work from the server to the clients’ machines as well as to avoid page reloads and improve the overall user experience. # # What happens in the browser is not the same as what happens in your script. Your browser will diligently execute the JavaScript code it receives from a server and create the DOM and HTML for you locally. However, if you request a dynamic website in your Python script, then you won’t get the HTML page content. # # When you use `requests`, you only receive what the server sends back. In the case of a dynamic website, you’ll end up with some JavaScript code instead of HTML. **The only way to go from the JavaScript code you received to the content that you’re interested in is to execute the code**, just like your browser does. The `requests` library can’t do that for you, but there are other solutions that can. # # For example, `requests-html` is a project created by the author of the `requests` library that **allows you to render JavaScript** using syntax that’s similar to the syntax in `requests`. It also includes capabilities for parsing the data by using `Beautiful Soup` under the hood. # # **NOTE:** Another popular choice for scraping dynamic content is `Selenium`. You can think of `Selenium` as a slimmed-down browser that executes the JavaScript code for you before passing on the rendered HTML response to your script. # # ## Step 3: Parse HTML Code With Beautiful Soup # # You’ve successfully scraped some HTML from the Internet, but when you look at it, it just seems like a huge mess. There are tons of HTML elements here and there, thousands of attributes scattered around—and wasn’t there some JavaScript mixed in as well? It’s time to parse this lengthy code response with the help of Python to make it more accessible and pick out the data you want. # # `Beautiful Soup` - is a Python library for parsing structured data. It allows you to interact with HTML in a similar way to how you interact with a web page using developer tools. The library exposes a couple of intuitive functions you can use to explore the HTML you received. To get started, use your terminal to install Beautiful Soup: # + from bs4 import BeautifulSoup import requests URL = "https://realpython.github.io/fake-jobs/" page = requests.get(URL) print('type:', type(page.content), '\n-------\n', page.content[:350], '\n...', sep='') soup = BeautifulSoup(page.content, "html.parser") print('-------\ntype:', type(soup)) # - # `BeautifulSoup` object takes `page.content`, which is the HTML content you scraped earlier, as its input. # # **NOTE:** You’ll want to pass `page.content` instead of `page.text` to avoid problems with character encoding. The `.content` attribute holds raw bytes, which can be decoded better than the text representation you printed earlier using the `.text` attribute. # # The 2nd argument, `"html.parser"`, makes sure that you use the appropriate parser for HTML content. # # ## Find Elements by ID # # In an HTML web page, every element can have an **id attribute** assigned. As the name already suggests, that **id attribute** - makes the element uniquely identifiable on the page. You can begin to parse your page by selecting a specific element by its ID. # # The element we’re looking for is a `
` with an id attribute that has the value `"ResultsContainer"`. It has some other attributes as well, but below is the gist of what you’re looking for: # # `
`
# ``
# `
` # # Beautiful Soup allows you to find that specific HTML element by its ID: # + results = soup.find(id="ResultsContainer") # For easier viewing, you can prettify any Beautiful Soup object # When you print it out. If you call `.prettify()` on the results variable # That you just assigned above, then you’ll see all the HTML contained within the
: print(results.prettify()[:350], '\n...') # When you use the element’s ID, you can pick out 1 element # From among the rest of the HTML. Now you can work with only this # Specific part of the page’s HTML. It looks like the soup just # Got a little thinner! However, it’s still quite dense. # - # ## Find Elements by HTML Class Name # # You’ve seen that every job posting is wrapped in a `
` element with the `class` card-content. Now you can work with your new object called results and select only the job postings in it: # + # `find_all()` returns all the tags and strings that match your filters # This can take a while if the document is large. # `.find_all()` - returns an iterable containing all the HTML # For all the job listings displayed on that page. job_elements = results.find_all("div", class_="card-content") for job_element in job_elements[:2]: print(job_element, end="\n"*2) print('--------------------------------------') # There’s still a lot of HTML! You saw earlier that your page has descriptive class names # On some elements. You can pick out those child elements # From each job posting with .find(): for job_element in job_elements[:2]: title_element = job_element.find("h2", class_="title") company_element = job_element.find("h3", class_="company") location_element = job_element.find("p", class_="location") print(title_element) print(company_element) print(location_element) print() # - # Each `job_element` is another `BeautifulSoup()` object. Therefore, you can use the same methods on it as you did on its parent element, results. # # With this code snippet, you’re getting closer and closer to the data that you’re actually interested in. Still, there’s a lot going on with all those HTML tags and attributes floating around: # # `

Senior Python Developer

`
# `

and Davis

`
# `

Stewartbury, AA

` # # ## Extract Text From HTML Elements # # You only want to see the title, company, and location of each job posting. And behold! Beautiful Soup has got you covered. You can add `.text` to a Beautiful Soup object to return only the text content of the HTML elements that the object contains: # + for job_element in job_elements[:2]: title_element = job_element.find("h2", class_="title") company_element = job_element.find("h3", class_="company") location_element = job_element.find("p", class_="location") print(title_element.text) print(company_element.text) print(location_element.text) print() print('--------------------') # Run the above code snippet, and you’ll see the text of each element displayed. # However, it’s possible that you’ll also get some extra whitespace. # Since you’re now working with Python strings, you can `.strip()` # The superfluous whitespace. You can also apply any other familiar Python string methods # To further clean up your text: for job_element in job_elements[:2]: title_element = job_element.find("h2", class_="title") company_element = job_element.find("h3", class_="company") location_element = job_element.find("p", class_="location") print(title_element.text.strip()) print(company_element.text.strip()) print(location_element.text.strip()) print() # - # ## Find Elements by Class Name and Text Content # # Not all of the job listings are developer jobs. Instead of printing out all the jobs listed on the website, you’ll first filter them using keywords. You know that job titles in the page are kept within `

` elements. To filter for only specific jobs, you can use the string argument: # + # This code finds all `

` elements where the contained string matches "Python" exactly. # NOTE: that you’re directly calling the method on your 1st results variable. # If you go ahead and `print()` the output of the above code snippet to your console, # Then you might be disappointed because it’ll be empty: python_jobs = results.find_all("h2", string="Python") print(python_jobs) # - # There was a Python job in the search results, so why is it not showing up? # # When you use `string=` as you did above, your program looks for that string exactly. Any differences in the spelling, capitalization, or whitespace will prevent the element from matching. In the next section, you’ll find a way to make your search string more general. # # ## Pass a Function to a Beautiful Soup Method # # In addition to strings, you can sometimes pass functions as arguments to `BeautifulSoup` methods. You can change the previous line of code to use a function instead: # + # Now you’re passing an anonymous function to the `string=` argument. # The `lambda` function looks at the text of each `

` element, # Converts it to lowercase, and checks whether the substring "python" is found anywhere. # You can check whether you managed to identify all the Python jobs with this approach: python_jobs = results.find_all( "h2", string=lambda text: "python" in text.lower() ) # Your program has found 10 matching job posts # That include the word "python" in their job title! print(len(python_jobs)) # + [markdown] tags=[] # Finding elements depending on their text content is a powerful way to filter your HTML response for specific information. `BeautifulSoup` allows you to use either exact strings or functions as arguments for filtering text in `BeautifulSoup` objects. # # However, when you try to run your scraper to print out the information of the filtered Python jobs, you’ll run into an error: # # `AttributeError: 'NoneType' object has no attribute 'text'` # # This message is a common error that you’ll run into a lot when you’re scraping information from the Internet. Inspect the HTML of an element in your `python_jobs` list. What does it look like? Where do you think the error is coming from? # # ## Identify Error Conditions # # When you look at a single element in `python_jobs`, you’ll see that it consists of only the `

` element that contains the job title. # # When you revisit the code you used to select the items, you’ll see that that’s what you targeted. You filtered for only the `

` title elements of the job postings that contain the word "`python`". As you can see, these elements don’t include the rest of the information about the job. # # You tried to find the job title, the company name, and the job’s location in each element in `python_jobs`, but each element contains only the job title text. # # Your diligent parsing library still looks for the other ones, too, and returns `None` because it can’t find them. Then, `print()` fails with the shown error message when you try to extract the `.text` attribute from one of these `None` objects. # # The text you’re looking for is nested in sibling elements of the `

` elements your filter returned. `BeautifulSoup` can help you to select sibling, child, and parent elements of each `BeautifulSoup` object. # - # ## Access Parent Elements # # One way to get access to all the information you need is to step up in the hierarchy of the DOM starting from the `

` elements that you identified. # # The `
` element with the card-content class contains all the information you want. It’s a third-level parent of the `

` title element that you found using your filter. # # With this information in mind, you can now use the elements in `python_jobs` and fetch their great-grandparent elements instead to get access to all the information you want: # + python_jobs = results.find_all( "h2", string=lambda text: "python" in text.lower() ) python_job_elements = [ h2_element.parent.parent.parent for h2_element in python_jobs ] print(python_job_elements[0]) # - # You added a list comprehension that operates on each of the `

` title elements in `python_jobs` that you got by filtering with the `lambda` expression. You’re selecting the parent element of the parent element of the parent element of each `

` title element. That’s 3 generations up! # # When you were looking at the HTML of a single job posting, you identified that this specific parent element with the class name card-content contains all the information you need. # # Now you can adapt the code in your for loop to iterate over the parent elements instead: for job_element in python_job_elements: # -- snip -- # When you run your script another time, you’ll see that your code once again has access to all the relevant information. That’s because you’re now looping over the `
` elements instead of just the `

` title elements. # # Using the `.parent` attribute that each `BeautifulSoup` object comes with gives you an intuitive way of stepping through your DOM structure and addressing the elements you need. You can also access child elements and sibling elements in a similar manner. # # ## Extract Attributes From HTML Elements # # At this point, your Python script already scrapes the site and filters its HTML for relevant job postings. Well done! However, what’s still missing is the link to apply for a job. # # While you were inspecting the page, you found 2 links at the bottom of each card. If you handle the link elements in the same way as you handled the other elements, you won’t get the URLs that you’re interested in: for job_element in python_job_elements[:2]: # -- snip -- links = job_element.find_all("a") for link in links: print(link.text.strip()) # If you run this code snippet, then you’ll get the **link texts** Learn and Apply instead of the associated URLs. # # That’s because the `.text` attribute leaves only the visible content of an HTML element. It strips away all HTML tags, including the HTML attributes containing the URL, and leaves you with just the link text. To get the URL instead, you need to extract the value of one of the HTML attributes instead of discarding it. # # The URL of a link element is associated with the `href` attribute. The specific URL that you’re looking for is the value of the `href` attribute of the second `` tag at the bottom the HTML of a single job posting: # # Start by fetching all the `` elements in a job card. Then, extract the value of their `href` attributes using square-bracket notation: for job_element in python_job_elements[:2]: # -- snip -- links = job_element.find_all("a") for link in links: link_url = link["href"] print(f"Apply here: {link_url}\n") # In this code snippet, you: # # 1. First fetched all links from each of the filtered job postings # 2. Then you extracted the href attribute, which contains the URL, using `["href"]` # 3. Printed it to your console # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Importing libraries and reading the dataset # + import pandas as pd import numpy as np from sklearn import preprocessing from IPython.display import display pd.options.display.max_columns = None # - df = pd.read_csv('dataset/Features Dataset.csv') df.head() # + df.index = list(df.ID) drop_list = ['Match', 'ID', 'Result'] df.drop(drop_list, axis=1, inplace=True) # - df.head() # ### Transforming and removing attributes df["Date"] = pd.to_datetime(df["Date"] ) df.head() # + column_date = df['Date'] df_date = pd.DataFrame({"year": column_date.dt.year, "month": column_date.dt.month, "day": column_date.dt.day, "hour": column_date.dt.hour, "dayofyear": column_date.dt.dayofyear, "week": column_date.dt.week, "weekofyear": column_date.dt.weekofyear, "dayofweek": column_date.dt.dayofweek, "weekday": column_date.dt.weekday, "quarter": column_date.dt.quarter, }) # - df_date.head() # ### Filling NaN values def health_transform(v): if v == 'H': return 4 if v == 'S': return 3 if v == 'A': return 2 if v == 'I': return 1 return 4 def prev_match_transform(v): if v == 'W': return 2 if v == 'L': return 1 return 0 def rank_transform(v): v = v.replace('-', ' ') split = v.split(' ') wins = float(split[0]) loses = float(split[1]) loses = loses if loses > 0 else 0.1 rank = wins / loses return rank df['Sex'] = df['Sex'].map(lambda s: 1 if s == 'Male' else 0) df['Health'] = df['Health'].apply(health_transform) df['Prev. Match'] = df['Prev. Match'].apply(prev_match_transform) df['Rank_T'] = 0 for index, row in df.iterrows(): if row['Sport'] != 'Tennis': df.loc[(df.index==index), 'Rank_T'] = rank_transform(row['Rank']) else: df.loc[(df.index==index), 'Rank_T'] = float(row['Rank']) df['Rank'] = df['Rank_T'] df['Rank_T'] = 0 for index, row in df.iterrows(): if row['Sport'] != 'Tennis': df.loc[(df.index==index), 'Rank_T'] = rank_transform(row['Rank Opp.']) else: df.loc[(df.index==index), 'Rank_T'] = float(row['Rank Opp.']) df['Rank Opp.'] = df['Rank_T'] def sport_transform(v): if v == 'Box': return 1 if v == 'MMA': return 2 if v == 'Tennis': return 3 return None df['Sport'] = df['Sport'].apply(sport_transform) df.drop('Rank_T', axis=1, inplace=True) def label_transform(v): if v == 'YES': return 1 if v == 'NO': return 0 return None df['Final Result'] = df['Final Result'].apply(label_transform) interviewee = set(df['Interviewee'].unique()) opponent = set(df['Opponent'].unique()) players = interviewee.union(opponent) players d = dict.fromkeys(players, 0) counter = 0 for k, v in d.items(): d[k] = counter counter += 1 def set_id(v): return d[v] df['Interviewee'] = df['Interviewee'].apply(set_id) df['Opponent'] = df['Opponent'].apply(set_id) df.drop('Date', axis=1, inplace=True) df = df.join(df_date) df['label'] = 0 df['label'] = df['Final Result'] df.drop('Final Result', axis=1, inplace=True) display(df) # ### Save Dataframe as csv file # + # file_name = 'Dataset/pool_matches_numerical_classified.csv' # df.to_csv(file_name, sep=',', encoding='utf-8', header=True, index=True) # - # ### Normalization x = df.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df_norm = pd.DataFrame(x_scaled, index=df.index, columns=df.columns) df_norm.head() # ### Save normalized Dataframe as csv file file_name = 'dataset/features_norm.csv' df_norm.to_csv(file_name, sep=',', encoding='utf-8', header=True, index=True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Wm6OkgVsbafJ" # Install captum package # !pip install captum # + id="-J9A0V63qLeW" import pandas as pd import numpy as np import random from utils import preprocessing, SlidingWindow, NegativeSampling, utils, modelhdfs, explainhdfs from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim from tqdm import tqdm import time import math import os from sklearn import metrics from sklearn.metrics import precision_recall_fscore_support from captum.attr import LayerIntegratedGradients import collections # + id="v7vbfwlP8IfJ" DATASET_NAME = 'HDFS' TRAIN_SIZE = 100000 RATIO = 0.1 SEED = 42 # + id="yerXXxqJqetY" # Download dataset and parsing the dataset with Drain preprocessing.parsing(DATASET_NAME) # + id="ERsjmU1r5Zo9" # Cut log data into sliding windows # Split data into training normal dataset, test normal dataset, and test abnormal dataset # Get the bigram from training normal dataset # Train a Word2Vec model with the training normal data # Number of keys include 'pad' random.seed(SEED) np.random.seed(SEED) train_normal, test_normal, test_abnormal, bigram, unique, weights, train_dict, w2v_dic = SlidingWindow.sliding(DATASET_NAME, train_size=TRAIN_SIZE) # + id="CSsRoGViaEpG" # # +1 for unknown VOCAB_DIM = len(train_dict)+1 OUTPUT_DIM = 2 EMB_DIM = 4 HID_DIM = 64 N_LAYERS = 1 DROPOUT = 0.0 BATCH_SIZE = 32 TIMES = 20 # + id="L8HIfgwvblO5" # Get negative samples and split into training data and val data random.seed(SEED) np.random.seed(SEED) neg_samples = NegativeSampling.negative_sampling_hdfs(train_normal, bigram, unique, TIMES, VOCAB_DIM) # + id="zUEk54oerMa5" df_neg = utils.get_dataframe(neg_samples, 1, w2v_dic) df_pos = utils.get_dataframe(list(train_normal['EventSequence']), 0, w2v_dic) df_pos.columns = df_pos.columns.astype(str) df_train = pd.concat([df_pos, df_neg], ignore_index = True, axis=0) df_train.reset_index(drop = True) y = list(df_train.loc[:,'class_label']) X = list(df_train['W2V_EventId']) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) train_iter = utils.get_iter_hdfs(X_train, y_train, w2v_dic, train_dict, BATCH_SIZE) val_iter = utils.get_iter_hdfs(X_val, y_val, w2v_dic, train_dict, BATCH_SIZE) # + id="QNSIPEQ3vqwx" random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True device = torch.device( "cuda" if torch.cuda.is_available() else"cpu") interpretableSAD = modelhdfs.C_lstm(weights, VOCAB_DIM, OUTPUT_DIM, EMB_DIM, HID_DIM, N_LAYERS, DROPOUT, device, BATCH_SIZE).to(device) print(f'The model has {modelhdfs.count_parameters(interpretableSAD):,} trainable parameters') print() optimizer = optim.Adam(interpretableSAD.parameters()) criterion = nn.CrossEntropyLoss() try: os.makedirs('Model') except: pass #Training interpretableSAD N_EPOCHS = 1 CLIP = 1 best_test_loss = float('inf') for epoch in tqdm(range(N_EPOCHS)): start_time = time.time() train_loss= modelhdfs.train(interpretableSAD, train_iter, optimizer, criterion, CLIP, epoch, device) val_loss = modelhdfs.evaluate(interpretableSAD, val_iter, criterion, device) end_time = time.time() epoch_mins, epoch_secs = modelhdfs.epoch_time(start_time, end_time) if val_loss < best_test_loss: best_test_loss = val_loss torch.save(interpretableSAD.state_dict(), 'Model/interpretableSAD_HDFS.pt') print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}') print(f'\t Val. Loss: {val_loss:.3f} | Val. PPL: {math.exp(val_loss):7.3f}') # + id="Lo2CgwMCvu6R" test_ab_X = test_abnormal['W2V_EventId'] test_n_X = test_normal['W2V_EventId'] y, y_pre = modelhdfs.model_precision(interpretableSAD, device, w2v_dic, train_dict, test_n_X.values.tolist(), test_ab_X.values.tolist()) f1_acc = metrics.classification_report(y, y_pre, digits=5) print(f1_acc) # + id="Tu0LfXrvcBub" lig = LayerIntegratedGradients(interpretableSAD, interpretableSAD.embedding) lst_train_keys = [] for i in train_normal.W2V_EventId.values: lst_train_keys.extend(i) dic_app = collections.Counter(lst_train_keys) if w2v_dic[str(len(train_dict))] not in dic_app.keys(): dic_app[w2v_dic[str(len(train_dict))]] = 0 start = [w2v_dic[i] for i in unique] df_attr = explainhdfs.get_dataset(interpretableSAD, device, lig, test_abnormal, dic_app, start) # + id="eoEGTpBZd09H" # %%capture cap for i in range(len(df_attr)): if len(df_attr['Event'].iloc[i]) >10 and len(df_attr['Event'].iloc[i]) < 30: display(explainhdfs.visualize_token_attrs(df_attr['Event'].iloc[i], np.array(df_attr['Attr'].iloc[i]), df_attr['Blk'].iloc[i])) # + id="PojhVWv87sFN" cap() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Loading the optional definition attribute # # 1) Load the ontology file while also loading the optional definition attribute ("def" or "defn") for each GO term # + from os.path import exists from goatools.obo_parser import GODag if not exists('go-basic.obo'): # !wget http://geneontology.org/ontology/go-basic.obo dag = GODag('go-basic.obo', optional_attrs=['def']) # - # ## Get one GOTerm object # # The DAG is a dictionary where: # * a key is an ID field (Example: GO:NNNNNNN) # * a value is a GOTerm object term = next(iter(dag.values())) # ## The optional 'def' obo field becomes a 'defn' data member in a GOTerm object # Because "def" is a keyword in Python, the optional obo attribute "def" gets renamed as "defn", meaning "definition" print(dir(term)) # ## Print one GO term and its optional definition (defn) data member value pattern = '{ID} {NAME}\n{DEF}' print(pattern.format(ID=term.item_id, NAME=term.name, DEF=term.defn)) # ## Print the first term in biological process term1 = dag['GO:0000001'] print(pattern.format(ID=term1.item_id, NAME=term1.name, DEF=term1.defn)) # Copyright (C) 2021-present, and . All rights reserved. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Imports import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings("ignore") from wrangle import wrangle_telco from prepare import scale_data # - # # explore_telco Exercises # * Our scenario continues: # # * As a customer analyst, I want to know who has spent the most money with us over their lifetime. I have monthly charges and tenure, so I think I will be able to use those two attributes as features to estimate total_charges. I need to do this within an average of $5.00 per customer. # - Do your work in a notebook named explore_telco. In addition, you should create a file named explore.py that contains the following functions for exploring your variables (features & target) train, validate, test = wrangle_telco(split=True) train.head() # #### 1. Write a function named plot_variable_pairs that accepts a dataframe as input and plots all of the pairwise relationships along with the regression line for each pair. # # sns.jointplot('tenure', 'monthly_charges', data=train, kind='reg', height=5); plt.show() sns.jointplot('tenure', 'total_charges', data=train, kind='reg', height=5); plt.show() # #### 2. Write a function named months_to_years that accepts your telco churn dataframe and returns a dataframe with a new feature tenure_years, in complete years as a customer. # # train['tenure_years'] = train.tenure / 12 train.tenure_years # #### 3. Write a function named plot_categorical_and_continuous_vars that accepts your dataframe and the name of the columns that hold the continuous and categorical features and outputs 3 different plots for visualizing a categorical variable and a continuous variable. # # cat_vars = [] quant_vars = ['tenure','total_charges','monthly_charges','tenure_years'] g = sns.PairGrid(train) g.map_diag(plt.hist) g.map_offdiag(plt.scatter) # #### 4. Save the functions you have written to create visualizations in a file named explore.py. Rewrite your notebook code so that you are using the functions imported from this file. # # # #### 5. Explore your dataset with any other visualizations you think will be helpful. # # sns.pairplot(train) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf_gpu # language: python # name: tf_gpu # --- # + [markdown] id="x1MC8MkgV2gW" colab_type="text" # # + [markdown] id="4suMUv4UKzGU" colab_type="text" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/ym001/Manteia/blob/master/notebook/notebook_Manteia_presentation1.ipynb) # # Run this nootbook with GPU : ->Modifier->Parametre du nootbook->accélérateur matériel->GPU # + id="nXesee8RWngy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 650} outputId="8d6a2f39-4da1-4ee2-ed3c-f19b9f6f240f" pip install manteia # + id="IGPczm5WV1Gg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["a43f9142011e4945be937ee2fd36ccde", "f3314f21b49c46f8b31fd7dcdd55620d", "fb81f8e3f4f6449496d0c4a57658b9d0", "3c750a8d41284f08a58651702e0b1bb0", "0bb4b523537e47d38f702b44b02aa7b9", "e79c4b6ef3a5421caccb426f28a0d845", "", "b839b524c3e54d63bf4e03de5d602d8f", "65a4ca3ae382440eb8a74d094481d73e", "", "", "10186a169be14bd1aa795cd6c4e599ea", "", "9de83207b5e549658da574c99b4fed87", "", "", "4d34aba3447a4caab5805f94e527ee09", "", "e762a1d6005240a7b834a2a71a9ddb05", "", "", "2af2194037a84ed4bfe1b8bc0061b52f", "219b7dd5ca7640629f03ee4acd12b6b5", "b91caa64320b4e2182b6540926a844f2", "", "", "", "5ba7ff1b8c3b4508972e87da96553e45", "", "", "157612982b594aeab8757006dae2df30", "a89cec822a12427ebb0637bd9896faf6"]} outputId="798221fb-075d-449e-eb8a-39961dce7ec7" from Manteia.Classification import Classification from Manteia.Model import Model documents = ['What should you do before criticizing Pac-Man? WAKA WAKA WAKA mile in his shoe.','What did say at the abortion clinic? Hasta last vista, baby.'] labels = ['funny','not funny'] model = Model(model_name ='roberta') cl=Classification(model,documents,labels,process_classif=True) # + id="5ekaykpJV1Gl" colab_type="code" colab={} # + id="daUZAFFeV1Gp" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # While Loop and Numbers # --- # # We can manipulate variables to represent a range of numbers by repetitively applying arithmetic operators. # # ### Recall: Assignment Operators # # ```python # a = b # (Assignment) # a += b # (Add & Assign) # a -= b # (Subtract & Assign) # a *= b # (Multiply & Assign) # a /= b # (Divide & Assign) # a //= b # (Floor Divide & Assign) # a **= b # (Exponentiate & Assign) # a %= b # (Modulo & Assign) # ``` # # These operators will manipulate/update the left variable with the result of the arithmetic operation with the right operand. # # If we can use these inside a while loop code block, we can repetitively change the variable to make a condition become False to end the while loop. # + # Counting from 1 to 10 num = 1 while num <= 10: print(num) num += 1 # end of while # + # Counting down from 10 to 1: num = 10 while num > 0: print(num) num -= 1 # end of while # + # Collatz Sequence # The conjecture of the collatz sequence is that any number following the set of rules will always end up at 1 # Rule: # If N is even, divide it by 2 # If N is odd, multiply by 3 then add 1 num = 13 # a random starting value while num != 1: print(num) if num % 2 == 0: num //= 2 else: num *= 3 num += 1 # end of while print(num) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="xpzAIXE0-1MJ" # # Decision Trees # A01038061 # + [markdown] id="7C3E4zYxDkE9" # ## Configs # + id="H6e0AXZA-0vX" #Python >= 3.5 required import sys assert sys.version_info >= (3,5) #Scikit-Learn >= 0.20 is required import sklearn assert sklearn.__version__ >="0.20" #imports import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from graphviz import Source from sklearn.tree import export_graphviz from sklearn.tree import DecisionTreeClassifier from matplotlib.colors import ListedColormap from sklearn.linear_model import LogisticRegression import os # + id="VZRugjAT_cRz" #Configs dirs for images mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) PROJECT_ROOT_DIR = "." CHAPTER_ID = "decision_trees" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID,) os.makedirs(IMAGES_PATH, exist_ok=True) # + id="QpXLLrUHAvId" #save fig functions on the directory def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # + id="3jSmz6h1u7Zg" #Batch Gradient Descent Algorithm def sigmoid(x): return 1.0 / (1 + np.exp(-x)) def lr_hypothesis(x, theta): return np.dot(x,theta) #return hb_opt => def batchGradientDescent(X, y, b0 = 0.5, ALPHA = 0.25, max_it=5000, threshold = 1 * pow(10,-4)): #prepare data X = X.values y = y.values zm, zn = X.shape z = np.ones(zm) z = z.reshape(zm, 1) X = np.append(z,X,axis=1) m, n = X.shape theta = np.zeros(n) * b0 theta = theta.reshape(n,1) y = y.reshape(-1,1) diff = 1 j = 0 while j < max_it and diff > threshold: last_t = theta infunc1 = sigmoid(lr_hypothesis(X, theta)) - y gradient = np.dot(X.T, infunc1) / m theta = theta - (ALPHA / m) * gradient diff = np.linalg.norm(last_t-theta) j+=1 return theta, j # + id="AwTBdy2zvA9P" #Testing functions #return if classify in 1 or 0. def classify(x): return int(x > 0.5) #compare data def compare(y_hat, y): return np.where(y_hat == y, 0, 1) #return error def error(y_hat, y, T): return 1 / T * sum(compare(y_hat, y)) #Apply model with values to predict probability of 1. def predict(model, X): X = X.values X = np.insert(X, 0, 1.0) return sigmoid(np.dot(model.T, X)) # + [markdown] id="MAH53HTQD97d" # ## Iris dataset # # + id="JkW9PiY_Byev" #import dataset and transform to csv from sklearn.datasets import load_iris iris = load_iris() X = iris.data[:, 2:] # work only with petal length and width y = iris.target # + id="fiZRiKPx-9ui" #Split data for traning and testing X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="mhwIPoUeFTow" outputId="e8c52840-e108-4c5f-a45a-17b208fe62a4" #Decision tree fit with the training data tree_clf = DecisionTreeClassifier(max_depth = 2, random_state=0) tree_clf = tree_clf.fit(X_train, y_train) #Create tree view export_graphviz( tree_clf, out_file = os.path.join(IMAGES_PATH, "iris_tree.dot"), feature_names = iris.feature_names[2:], class_names = iris.target_names, rounded=True, filled=True ) Source.from_file(os.path.join(IMAGES_PATH, "iris_tree.dot")) # + colab={"base_uri": "https://localhost:8080/"} id="EsXIMmp7A6KQ" outputId="8eab3446-103b-46fb-c6b4-2d04472d1879" #Calculate Accuracy: correct predictions / total number of data points score = tree_clf.score(X_test, y_test) print("Score of iris dataset with Decision tree model: ", score) # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="eRzn3cwai54t" outputId="d4418d03-e796-4256-f3d2-9c97230fc0ba" #Visualization by each split: need to work only with binary classifications def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True): x1s = np.linspace(axes[0], axes[1], 100) x2s = np.linspace(axes[2], axes[3], 100) x1, x2 = np.meshgrid(x1s, x2s) X_new = np.c_[x1.ravel(), x2.ravel()] y_pred = clf.predict(X_new).reshape(x1.shape) custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap) if not iris: custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50']) plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8) if plot_training: plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris setosa") plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris versicolor") plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris virginica") plt.axis(axes) if iris: plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) else: plt.xlabel(r"$x_1$", fontsize=18) plt.ylabel(r"$x_2$", fontsize=18, rotation=0) if legend: plt.legend(loc="lower right", fontsize=14) plt.figure(figsize=(8, 4)) plot_decision_boundary(tree_clf, X_test, y_test) plt.plot([2.45, 2.45], [0, 3], "k-", linewidth=2) plt.plot([2.45, 7.5], [1.75, 1.75], "k--", linewidth=2) plt.text(1.40, 1.0, "Depth=0", fontsize=15) plt.text(3.2, 1.80, "Depth=1", fontsize=13) save_fig("decision_tree_decision_boundaries_plot") plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="uyckqE0GyUf7" outputId="d032893e-bc81-4851-9e04-e9b4e0064d21" print("Iris Figs generated in: ", IMAGES_PATH) # + [markdown] id="JeKhNHycFVV8" # ## Wine dataset # # + id="vsFriFRiGnhd" #import dataset and transform to csv from sklearn.datasets import load_wine wine = load_wine() df_wine = pd.DataFrame(data=wine['data'], columns=wine['feature_names']) df_wine['target'] = wine['target'] # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="LucOkjhooNm6" outputId="88c2516e-0a93-490f-da92-5cfb8e596ce5" df_wine.sample(5) # + colab={"base_uri": "https://localhost:8080/", "height": 304} id="ERIeoNtboTfj" outputId="e5d0b10d-869c-4d11-ce82-29bfcbc839c7" df_wine.describe() # + id="6l0cL9O-oZOA" #Split data for traning and testing featuresNames = df_wine.columns[:-1] classNames = 'target' X_train, X_test, y_train, y_test = train_test_split(df_wine[featuresNames], df_wine[classNames], test_size=0.33, random_state=42) # + colab={"base_uri": "https://localhost:8080/"} id="Rbgs1VY55MuN" outputId="708cda65-b4f0-417d-fe8c-957f534940e5" wine.target_names # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="zlejJL65pQFl" outputId="afae0061-610c-4dc1-aad5-31c2e86adeba" #Decision tree fit with the training data tree_clf = DecisionTreeClassifier(max_depth = 2, random_state=0) tree_clf = tree_clf.fit(X_train, y_train) #Create tree view export_graphviz( tree_clf, out_file = os.path.join(IMAGES_PATH, "wine_tree.dot"), feature_names = featuresNames, class_names = wine.target_names, rounded=True, filled=True ) Source.from_file(os.path.join(IMAGES_PATH, "wine_tree.dot")) # + colab={"base_uri": "https://localhost:8080/"} id="AvL1wpffqDnr" outputId="5bcafeac-84a8-4632-bcdb-7ed24592c5ac" #Calculate Accuracy: correct predictions / total number of data points score = tree_clf.score(X_test, y_test) print("Score of wine dataset Decision Tree model: ", score) # + colab={"base_uri": "https://localhost:8080/"} id="i5IRVgyiyw8T" outputId="a5687b2c-26d0-4dda-823c-b98ee0844a3c" print("Wine Figs generated in: ", IMAGES_PATH) # + [markdown] id="218AecS7qjlz" # ## Breast Cancer dataset # # + id="fODlZLNsqpAI" from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() df_cancer = pd.DataFrame(data=cancer['data'], columns=cancer['feature_names']) df_cancer['target'] = cancer['target'] # + colab={"base_uri": "https://localhost:8080/", "height": 249} id="lsZRUBNOrNjq" outputId="9a4068e5-31d8-4078-d89d-4eecdb8586d1" df_cancer.sample(5) # + colab={"base_uri": "https://localhost:8080/", "height": 338} id="byo-KolirTcj" outputId="6a297808-f094-411d-d774-3f648752b180" df_cancer.describe() # + id="dDj0j7cTrVtx" #Split data for traning and testing featuresNames = df_cancer.columns[:-1] classNames = 'target' X_train, X_test, y_train, y_test = train_test_split(df_cancer[featuresNames], df_cancer[classNames], test_size=0.33, random_state=42) # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="6uTGuAYirezq" outputId="8d9e7142-e75f-4982-eb25-0c85e194200f" #Decision tree fit with the training data tree_clf = DecisionTreeClassifier(max_depth = 2, random_state=0) tree_clf = tree_clf.fit(X_train, y_train) #Create tree view export_graphviz( tree_clf, out_file = os.path.join(IMAGES_PATH, "cancer_tree.dot"), feature_names = featuresNames, class_names = cancer.target_names, rounded=True, filled=True ) Source.from_file(os.path.join(IMAGES_PATH, "cancer_tree.dot")) # + colab={"base_uri": "https://localhost:8080/"} id="ZkepYfLGryVy" outputId="a3b14ee9-43c8-4743-a857-f3979857894b" #Calculate Accuracy: correct predictions / total number of data points score = tree_clf.score(X_test, y_test) print("Score of cancer dataset with Decision tree model: ", score) # + colab={"base_uri": "https://localhost:8080/"} id="wcFhLkZYy4Vv" outputId="dd65f1f4-cc05-4b64-9d9f-803450b41b16" print("Cancer Figs generated in: ", IMAGES_PATH) # + colab={"base_uri": "https://localhost:8080/"} id="S_VqLEXPx0TY" outputId="01fe33ef-a28a-4ef6-936f-fa9f36325403" #Use logisticRegression model, j = batchGradientDescent(X_train, y_train, 0.5, 0.05, 1000, 0.0001) print('Logistic Regression Batch Gradient model B vector: ') print(model) print('Logistic Regression Batch Gradient model iterations: ', j) # + colab={"base_uri": "https://localhost:8080/"} id="jBdNXaz2x2Ga" outputId="8f4a7fa3-f876-415f-f16b-222e1b13f2ad" #Test model #predict values y_predicted_value = X_test.apply(lambda x: predict(model, x), axis=1) #Classify values y_predicted = y_predicted_value.apply(classify) #error print('error of cancer dataset with BatchGradient model: ', error(y_predicted, y_test, y_predicted.size)) print('score of cancer dataset with BatchGradient model: ', 1-error(y_predicted, y_test, y_predicted.size)) # + colab={"base_uri": "https://localhost:8080/"} id="0onDfrp2BZ_1" outputId="ebf6bf62-2515-4650-ef83-ec8242b600f7" #Logistic Regression with SciKit-Learn model = LogisticRegression(max_iter=10000) model.fit(X_train, y_train) print("coef of cancer dataset with SciKit-Learn model:", model.coef_) print("bias od cancer dataset with SciKit-Learn model:", model.intercept_) print("max_iters of model:", 10000) # + colab={"base_uri": "https://localhost:8080/"} id="myxiNfqoBeII" outputId="2f9e5271-d705-4920-8311-b6c5135f4c3b" #error with SciKit-Learn y_hat = model.predict(X_test) print('error of cancer dataset with SciKit-Learn model:', error(y_hat, y_test, y_hat.size)) print('Score of cancer dataset SciKit-Learn model:', 1-error(y_hat, y_test, y_hat.size)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # name: python3 # --- import twint import gc gc.collect() import twint import nest_asyncio nest_asyncio.apply() # Configure c = twint.Config() c.Username = "DiyanetTV" c.Limit = 300 c.Store_csv = True c.Output = "none.csv" twint.run.Search(c) import nest_asyncio nest_asyncio.apply() def get_followers_following(username): dic={} c = twint.Config() c.Username = username c.Hide_output = True c.Store_object = True sonuc= twint.run.Lookup(c) fol = twint.output.users_list[:] dic["followers"] = fol[0].followers dic["following"] = fol[0].following return dic tweets=[] c = twint.Config() c.Username = 'DiyanetTV' c.Since = "2019-3-3" c.Limit = 5000 c.Store_object = True c.Store_csv = True c.Output = "DiyanetTV.csv" twint.run.Search(c) tweets = twint.output.tweets_list print(len(tweets)) # + import io hashtags={} #print(type(tweets), len(tweets), type(hashtags)) # first iterate over the tweets for t in tweets: #print('t:', t, type(t), t.hashtags, type(t.hashtags)) # then iterate over the hashtags of that single tweet for h in t.hashtags: # increment the count if the hashtag already exists, otherwise initialize it to 1 #print('h:',h,type(h),t.username) if(t.username in hashtags): if(h in hashtags[t.username]): hashtags[t.username][h] += 1 else: hashtags[t.username][h]=1 else: hashtags[t.username]={h:1} # now save the data with io.open('hashtags.csv', 'w', encoding="utf-8") as output: output.write('username,hashtag,count\n') for user in hashtags: for h in hashtags[user]: output.write('{},{},{}\n'.format(user, h, hashtags[user][h])) # + from collections import Counter replies = twint.Config() replies.Since = "2018-03-01" replies.Pandas = True replies.To = "@DiyanetTV" twint.run.Search(replies) df = twint.storage.panda.Tweets_df #print(df) # - #print(df.head()) df.to_csv('Replies.csv', index=False) Replies = {x:y for x,y in zip(df['conversation_id'],df['nreplies'])} fetchedReplies =Counter(df['conversation_id']) #print(df[df['conversation_id']=='1243481878402723840']) for tweet in Replies: print(tweet, "\t{}\t{}\t".format(Replies[tweet],fetchedReplies[tweet])) # + mentions={} #print(tweets) for t in tweets: #print('t:', t, type(t), t.hashtags, type(t.hashtags)) # then iterate over the hashtags of that single tweet for m in t.mentions: #print(m, len(mentions)) # increment the count if the hashtag already exists, otherwise initialize it to 1 #print('h:',h,type(h),t.username) if(m['screen_name'] in mentions): mentions[m['screen_name']]+=1 else: mentions[m['screen_name']]=1 print(mentions) # - import pandas as pd mentions_df= pd.DataFrame(mentions.items(), columns=['screen_name', 'count']) mentions_df.to_csv('mentions.csv', index=False) import sys print(sys.executable) # + df_tweet=pd.read_csv('DiyanetTV.csv') print(df_tweet.columns) print(df_tweet.describe()) df_tweet['tweet_date']=pd.to_datetime(df_tweet['date'] + ' ' + df_tweet['time']) df_tweet = df_tweet.rename(columns={'tweet': 'Tweet', 'tweet_date': 'Timestamp', 'hashtags': 'Subject'}) print(df_tweet.head()) # - def twint2pd(columns): return twint.output.panda.Tweets_df[columns] tweet_df= df_tweet[['Timestamp','username','Tweet', 'Subject','mentions','replies_count','retweets_count','likes_count']] print(tweet_df.head()) tweet_list=tweet_df['Tweet'].tolist() from matplotlib import pyplot series = df_tweet[['date','likes_count']] print(series.head()) series.plot() pyplot.show() def count_plot_data(df, freq): plot_df = df.set_index('Timestamp').groupby('Subject').resample(freq).id.count().unstack(level=0, fill_value=0) plot_df.index.rename('Date', inplace=True) plot_df = plot_df.rename_axis(None, axis='columns') return plot_df # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_pytorch_p36) # language: python # name: conda_pytorch_p36 # --- # + from spotlight.cross_validation import random_train_test_split from spotlight.datasets.movielens import get_movielens_dataset from spotlight.evaluation import mrr_score from spotlight.factorization.implicit import ImplicitFactorizationModel from spotlight.interactions import Interactions import pandas as pd from spotlight.evaluation import * from torch.utils.tensorboard import SummaryWriter import torch from torch.utils.tensorboard import SummaryWriter import logging from spotlight.factorization.representations import * import os import pandas as pd import numpy as np from spotlight.cross_validation import random_train_test_split from spotlight.datasets.movielens import get_movielens_dataset from spotlight.evaluation import mrr_score from spotlight.factorization.implicit import ImplicitFactorizationModel from spotlight.interactions import Interactions import pandas as pd from spotlight.evaluation import * from torch.utils.tensorboard import SummaryWriter import torch from torch.utils.tensorboard import SummaryWriter import logging from spotlight.factorization.representations import * import os import collections from spotlight.sequence.implicit import ImplicitSequenceModel from spotlight.sequence.representations import CNNNet, PoolNet input_config = os.environ hyperparameters = { 'loss': "adaptive_hinge", 'batch': 128, 'lr': 1e-3, 'l2': 1e-06, 'n_iter': 50, 'emb_dim': 128, 'type': 'pool', "layers": [int(x) for x in "256-128-64-1".split("-")], "nonlin": 'tanh', "max_seq": 50, "min_seq": 2, "step": 2, "fmax_seq": 50 } h = hyperparameters if 'SUFFIX' in input_config: suffix = input_config['SUFFIX'] else: suffix = "1" if 'LOSS' in input_config: h['loss'] = input_config['LOSS'] if 'LR' in input_config: h['lr'] = float(input_config['LR']) if 'L2' in input_config: h['l2'] = float(input_config['L2']) if 'MOM' in input_config: h['mom'] = float(input_config['MOM']) else: h['mom'] = 0.9 if 'NEGSAMPLES' in input_config: h['neg'] = int(input_config['NEGSAMPLES']) else: h['neg'] = 5 if 'BATCH' in input_config: h['batch'] = int(input_config['BATCH']) h['amsgrad'] = False if 'AMSGRAD' in input_config: h['amsgrad'] = (input_config['AMSGRAD'] == 'True') h['adamw'] = False if 'ADAMW' in input_config and input_config['ADAMW']: h['adamw'] = (input_config['ADAMW'] == 'True') if 'EMBDIM' in input_config: h['emb_dim'] = int(input_config['EMBDIM']) if "LAYERS" in input_config: h["layers"] = [int(x) for x in input_config['LAYERS'].split("-")] if "NONLIN" in input_config: h["nonlin"] = input_config['NONLIN'] if "MAXSEQ" in input_config: h["max_seq"] = int(input_config["MAXSEQ"]) if "MINSEQ" in input_config: h["min_seq"] = int(input_config["MINSEQ"]) if "STEPSIZE" in input_config: h["step"] = int(input_config["STEPSIZE"]) if "FILTERSEQ" in input_config: h["fmax_seq"] = int(input_config["FILTERSEQ"]) betas = (h['mom'], 0.999) use_cuda = True tensorboard_base_dir = "sruns" model_store_dir = "smodels" n_iters = 50 # loss="adaptive_hinge" log_loss_interval = 1000 log_eval_interval = 20000 model_alias = ",".join([k + "=" + str(v) for k, v in collections.OrderedDict(h).items()]) model_alias = "pool_" + suffix + "_" + model_alias # train_data_path = "s3a://tubi-playground-production/smistry/emb3/train-aug-28-phase1" train_data_path = "/home/ec2-user/emb3/data/train-aug-28-phase" + suffix logging.basicConfig(filename="slogs/" + model_alias + '.log', filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.DEBUG) max_sequence_length = h["max_seq"] min_sequence_length = h["min_seq"] step_size = h["step"] original_train_data = pd.read_parquet(train_data_path) logger.info("Data is downloaded") # train_data = original_train_data uvs = original_train_data.groupby("uindex")["vindex"].agg(list) train_data = original_train_data[original_train_data.uindex.isin(uvs[uvs.apply(lambda x: len(x)) <= h["fmax_seq"]].index)] logger.info("Filtered train data..") train_data["vindex"] = train_data["vindex"] + 1 num_items = len(original_train_data["vindex"].unique()) + 2 interactions = Interactions(train_data["uindex"].to_numpy(), train_data["vindex"].to_numpy(), train_data["pct_cvt"].to_numpy(), train_data["latest_watch_time"].to_numpy(), num_users=len(original_train_data["uindex"].unique()), num_items=num_items) # if "1500K" in suffix: # logger.info("Increasing step size and max_sequence_length") # step_size = 2 # min_sequence_length = 2 # max_sequence_length = 50 train_seq = interactions.to_sequence(max_sequence_length=max_sequence_length, min_sequence_length=min_sequence_length, step_size=step_size) logger.info("Data is loaded and converted to sequences..") writer = SummaryWriter(log_dir='{}/{}'.format(tensorboard_base_dir, model_alias)) writer.add_text('alias', model_alias, 0) writer.add_text('hyperparameters', str(h), 0) def notify_loss_completion(epoch_id, batch_id, loss, net, model): # print("notify_loss_completion") writer.add_scalar("Batch/loss", loss, batch_id) logging.info('[Epoch {}] Batch {}, Loss {}'.format(epoch_id, batch_id, loss)) def notify_batch_eval_completion(epoch_id, batch_id, loss, net, model): # print("notify_batch_eval_completion") m = 1 def notify_epoch_completion(epoch_num, total_loss, net, model): # print("notify_epoch_completion") writer.add_scalar("Epoch/loss", total_loss, epoch_num) pairs_ndcg = nn_pairs_ndcg_score(net) writer.add_scalar("Epoch/pairs_ndcg", pairs_ndcg, epoch_num) # hit_ratio, ndcg = evaluate_hit_ratio_and_ndcg(model) # writer.add_scalar("Epoch/HR", hit_ratio, epoch_num) # writer.add_scalar("Epoch/NDCG", ndcg, epoch_num) hit_ratio, ndcg = -1, -1 logging.info('******** [Epoch {}] Embs NDCG {:.4f}, Hit Ratio: {:.4f}, NDCG: {:.4f}'.format(epoch_num, pairs_ndcg, hit_ratio, ndcg)) torch.save(net, model_store_dir + "/" + model_alias + "-" + str(epoch_num)) net.train() if "BASE_DIR" not in os.environ: os.environ["BASE_DIR"] = "/home/ec2-user/emb3" random_state = np.random.RandomState(100) net = PoolNet(num_items, embedding_dim=h['emb_dim'], layers=h["layers"], nonlinearity=h['nonlin']) model = ImplicitSequenceModel(loss=h['loss'], representation=net, batch_size=h['batch'], learning_rate=h['lr'], l2=h['l2'], n_iter=h['n_iter'], embedding_dim=h['emb_dim'], use_cuda=use_cuda, random_state=random_state, notify_loss_completion=notify_loss_completion, notify_batch_eval_completion=notify_batch_eval_completion, notify_epoch_completion=notify_epoch_completion, log_loss_interval=5000, log_eval_interval=20000, amsgrad=h['amsgrad'], adamw=h['adamw'], betas=betas, num_negative_samples=h['neg']) logger.info("Model is initialized, now fitting..") #model.fit(train_seq) # - sequences = train_seq.sequences.astype(np.int64) sequences_tensor = gpu(torch.from_numpy(sequences), True) from spotlight.torch_utils import * net = gpu(net, True) net self = model for minibatch_num, batch_sequence in enumerate(minibatch(sequences_tensor, batch_size=64)): net.train() net.train() sequence_var = batch_sequence user_representation, _ = net.user_representation( sequence_var ) print(user_representation.shape) print(sequence_var.shape) positive_prediction = net(user_representation, sequence_var) if self._loss == 'adaptive_hinge': negative_prediction = self._get_multiple_negative_predictions( sequence_var.size(), user_representation, n=self._num_negative_samples) else: negative_prediction = self._get_negative_prediction(sequence_var.size(), user_representation) print("Done") self._optimizer.zero_grad() loss = self._loss_func(positive_prediction, negative_prediction, mask=(sequence_var != PADDING_IDX)) loss_val = loss.item() epoch_loss += loss_val interval_loss += loss_val loss.backward() model # + suffix = "1500K-with-3-vids" train_data_path = "/home/ec2-user/emb3/data/train-aug-28-phase" + suffix original_train_data = pd.read_parquet(train_data_path) validate_neg_flatten_vids = pd.read_parquet(os.environ['BASE_DIR'] + "/data/validate-neg-flatten-aug-28-phase" + suffix) validate_pos_flatten_vids = pd.read_parquet(os.environ['BASE_DIR'] + "/data/validate-pos-flatten-aug-28-phase" + suffix) validation_train_data = original_train_data[original_train_data["uindex"].isin(validate_pos_flatten_vids.uindex.tolist())] validation_train_data["vindex"] = validation_train_data["vindex"] + 1 interactions = Interactions(validation_train_data["uindex"].to_numpy(), validation_train_data["vindex"].to_numpy(), validation_train_data["pct_cvt"].to_numpy(), validation_train_data["latest_watch_time"].to_numpy(), num_users=len(original_train_data["uindex"].unique()), num_items=len(original_train_data["vindex"].unique()) + 2) max_sequence_len=100 sequences = interactions.to_sequence(max_sequence_length=max_sequence_len, step_size=max_sequence_len) # - from spotlight.torch_utils import * use_cuda= True sqs = sequences.sequences[:5] sequences_tensor = gpu(torch.from_numpy(sqs).long(),use_cuda) num_items=20000 net = PoolNet(num_items, embedding_dim=h['emb_dim'], layers=h["layers"], nonlinearity=h['nonlin']) net = gpu(net, True) net user_rp = net.user_representation(sequences_tensor)[1] vids=[1,3, 4,5,7] target_embedding = net.item_embeddings(gpu(torch.from_numpy(np.array(vids).reshape(-1, 1)).long(),use_cuda)).permute(0, 2, 1).squeeze() targets = gpu(torch.from_numpy(np.array(vids).reshape(-1, 1)), True) net(user_rp, targets) use_cuda= True sqs = sequences.sequences[:5] sequences_tensor = gpu(torch.from_numpy(sqs).long(),use_cuda) user_rp = mm.user_representation(sequences_tensor)[1] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 15장 웹 API # ## 15.1 웹 API의 이해 # ### 웹 API의 데이터 획득 과정 # ### 웹 API의 인증 방식 # ### 응답 데이터의 형식 및 처리 # # - json.dumps(데이터) 파이썬데이터를 제이슨데이터로 # - json.loads(데이터) 제이슨데이터를 파이썬데이터로 # - requests.get(url).json() 제이슨데이터를 파이썬 데이터로 # - xmltodict.parse(데이터) xml데이터를 파이썬데이터로 # ### 웹 API 정리된 사이트 # - https://www.programmableweb.com/category/all/apis # #### JSON 형식의 데이터 처리 # **[15장: 458 ~ 459페이지]** # + import json python_dict = { # 파이썬 딕셔너리 생성 "이름": "홍길동", "나이": 25, "거주지": "서울", "신체정보": { "키": 175.4, "몸무게": 71.2 }, "취미": [ "등산", "자전거타기", "독서" ] } type(python_dict) # - # **[15장: 459페이지]** # + json_data = json.dumps(python_dict) # 파이썬 파일을 제이슨 파일로 변경하는 함수 print(type(json_data)) # - # **[15장: 459페이지]** # - python 파일 -> json 파일 : json.dumps() # - 한글데이터가 깨져서 출력되기때문에 인코딩이 필수 print(json_data) # **[15장: 459페이지]** json_data = json.dumps(python_dict, indent=4, sort_keys=True, ensure_ascii=False) # ensure_ascii=False를 써야 한글인코딩이 됨 # sort_keys=True 가나다 순으로 정렬 print(type(json_data)) # 제이슨데이터 # indent=4 4칸 들여쓰기 print(json_data) # **[15장: 460페이지]** # - json 파일 -> python 파일 : json.loads() json_dict = json.loads(json_data) # 제이슨 데이터를 다시 파이썬데이터로 출력 print(type(json_dict)) # 타입? 파이썬파일 display(json_dict) # 파이썬 딕셔너리 출력 # **[15장: 460페이지]** json_dict['신체정보']['몸무게'] # 신체정보의 몸무게를 출력 (딕셔너리 안의 딕셔너리를 키값으로 출력) # **[15장: 460페이지]** json_dict['취미'] # 키가 취미인 값을 출력 (리스트출력) json_dict['취미'][0:2] # 딕셔너리의 리스트를 슬라이싱으로 출력 # **[15장: 461페이지]** json_dict['취미'][0] # 취미 키, 0번째 리스트 원소 출력 # #### XML 형식의 데이터 처리 # **[15장: 465페이지]** # + #xml데이터 작성 xml_data = """ <사용자정보> <이름>홍길동 <나이>25 <거주지>서울 <신체정보> <키 unit="cm">175.4 <몸무게 unit="kg">71.2 <취미>등산 <취미>자전거타기 <취미>독서 """ print(type(xml_data)) # json이나 xml은 파이썬 파일로 변환되기 전까진 str타입으로 표기됨 print(xml_data) # xml데이터 출력 # - # **[15장: 465페이지]** # - json 파일 -> python 파일 (json 모듈 사용) # - xml 파일 -> python 파일 (xmltodict 모듈 사용) #

# - json.dumps(): json파일을 파이썬파일로 # - xmltodict.parse(): xml파일을 파이썬파일로 conda install xmltodict # xmltodict 모듈 설치하기 # - **xml to python** # - python to json # - json to python # + import xmltodict # XML 모듈 불러오기 dict_data = xmltodict.parse(xml_data, xml_attribs=True) # xmltodict.parse() # xml데이터를 파이썬데이터로 변경 # 파라미터로 xml_attribs=True를 넣어 유닛값을 살려줌 display(dict_data) #xml을 파이썬데이터로 변환시키면 orderedDictionary 형태로 변환됨 (키:값이 튜플 형태에 저장되어 있음) # 키와 벨류를 튜플로 바꿈 # 속성값을 @unit으로 표기 # 입력값을 #text로 표기 # - # ### xml데이터에서 파이썬로, 다시 파이썬데이터에서 제이슨데이터로 # - xml to python # - **python to json** # - json to python # - 유닛값이 들어간 딕셔너리 형태로 변환됨 data_ex = json.dumps(dict_data, indent=4, sort_keys=True, ensure_ascii=False) print(data_ex) # - xml to python # - python to json # - **json to python** # + data_ex1 = json.loads(data_ex) display(data_ex1) type(data_ex1) #딕셔너리 안의 딕셔너리 구조 # - # **[15장: 466페이지]** # - xml데이터에서 파이썬데이터로 변경한 후, 원소 출력하기 # - 파이썬데이터로 변경하면 이전에 json이였든 xml이였든 모두 동일한 형식(파이썬 딕셔너리)으로 출력함 dict_data['사용자정보']['이름'] # **[15장: 466페이지]** dict_data['사용자정보']['신체정보'] # 신체정보 변수에 들어있는 딕셔너리의 각 항목을 유닛과 텍스트로 구분하여 출력 data_ex1['사용자정보']['신체정보'] # xml -> 파이썬 -> json -> 파이썬 # **[15장: 466 ~ 467페이지]** dict_data['사용자정보']['신체정보']['키']['@unit'] # 유닛 출력, 딕셔너리로 들어있기에 키값으로 입력 dict_data['사용자정보']['신체정보']['키']['#text'] # 벨류 출력 print(data_ex1['사용자정보']['신체정보']['키']['#text'], dict_data['사용자정보']['신체정보']['키']['@unit'], sep='') # **[15장: 467페이지]** # - xml파일을 파이썬파일로 파싱한 다음, 각 요소를 변수로 저장해서 사용하기 # + import xmltodict dict_data = xmltodict.parse(xml_data) # xml에서 파이썬데이터로 변환, xml_attribs=False로 입력하지 않는 한 속성값 유지 user_name = dict_data['사용자정보']['이름'] # 사용자정보의 이름 벨류 선택 body_data = dict_data['사용자정보']['신체정보'] # 사용자정보의 신체정보 벨류 선택 height = body_data['키']['#text'] # 키의 벨류 선택 height_unit = body_data['키']['@unit'] # 키의 유닛 선택 weight = body_data['몸무게']['#text'] # 몸무게의 벨류 선택 weight_unit = body_data['몸무게']['@unit'] # 몸무게의 유닛 선택 print("[사용자 {0}의 신체정보]".format(user_name)) # 사용자 이름 벨류를 넣은 문장 출력 print("*키: {0}{1}".format(height, height_unit)) # 키 벨류와 키 유닛 출력 print("*몸무게: {0}{1}".format(weight, weight_unit)) # 몸무게 벨류와 몸무게 유닛 출력 # - # **[15장: 467페이지]** # + dict_data2 = xmltodict.parse(xml_data, xml_attribs=False) # 유닛값 제외하고 파이썬데이터로 변환 print(type(dict_data2)) dict_data2 # + dict_data3 = ([('이름', '홍길동'), ('나이', '25'), ('거주지', '서울')]) print(type(dict_data3)) dict_data3['이름'] # 리스트안에 직접 튜플을 넣을 경우, 딕셔너리처럼 검색되지 않음 # 하지만 xml을 파이썬객체로 변환시키면 OrderedDict타입으로 변환되고, 리스트 안의 튜플형태로 저장되고 딕셔너리의 기능을 수행함 # - # ### 웹 사이트 주소에 부가 정보 추가하기 # #### 웹 사이트 주소에 경로 추가하기 # **[15장: 468페이지]** # - url 합치기 base_url = "https://api.github.com/" sub_dir = "events" url = base_url + sub_dir print(url) # - requests.get()하여 해당 url 가져오기 # + import requests r = requests.get(base_url+sub_dir).url r # - dir(requests.get(base_url+sub_dir)) # requests.get() 하여 사용할 수 있는 메소드 리스트 # **[15장: 469페이지]** # + #방법1 import requests base_url = "https://api.github.com/" # 상위 주소 sub_dirs = ["events", "user", "emails" ] # 하위 주소 (리스트) for sub_dir in sub_dirs: # for반복문 url_dir = base_url + sub_dir # 상위주소와 하위주소를 더함 (3번 반복, 3개 주소 생성) r = requests.get(url_dir) # 주소의 데이터를 requests로 불러오기 print(r.url) # 스크래핑에서는 .text를 붙여서 소스코드를 불러왔지만, # 이번에는 .url를 붙여서 주소를 불러오기 # + ''' 같은 방법 웹스크레이핑에서 requests.get(주소).text로 바로 소스코드를 불러왔듯 requests.get(주소).url로 바로 url을 불러오기''' import requests base_url = "https://api.github.com/" # 상위 주소 sub_dirs = ["events", "user", "emails" ] # 하위 주소 (리스트) for sub_dir in sub_dirs: # for반복문 url_dir = base_url + sub_dir r = requests.get(url_dir).url print(r) # - # request.get()을 넣지 않은 반복문? (딕셔너리 형태의 파라미터를 넣지 않는다면 차이 없음) for sub_dir in sub_dirs: url_dir = base_url + sub_dir print(url_dir) # #### 웹 사이트 주소에 매개변수 추가하기 # **[15장: 470페이지]** # + import requests LAT = '37.57' # 위도 LON = '126.98' # 경도 API_KEY = '' # API 키(임의의 API 키, 권한을 받지 않은 API이기에 실행 안됨) UNIT = 'metric' # 단위 site_url = "http://api.openweathermap.org/data/2.5/weather" # 상위주소 parameter = "?lat=%s&lon=%s&appid=%s&units=%s"%(LAT, LON, API_KEY, UNIT) # 하위 주소 (문자열의 %s 부분에 변수를 대입) url_para = site_url + parameter # 전체 주소 r = requests.get(url_para) # 주소의 웹정보 가져오기 print(r.url) # 가져온 웹정보중 주소 불러오기 print(url_para) # 그냥 출력해도 동일 (딕셔너리 파라미터를 쓰지 않는 경우 동일하게 출력됨) # - # - url주소 디코딩 함수 : requests.utils.unquote() # + url_ex = 'https://www.google.com/search?biw=1280&bih=610&tbm=isch&sa=1&ei=Z_ZcXeDMLrnemAW68bDYBA&q=우주&oq=우주&gs_l=img.3..35i39j0l9.7639.7987..8131...1.0..1.99.386.4......0....1..gws-wiz-img.eOi8KabzOfY&ved=0ahUKEwignOP2u5PkAhU5L6YKHbo4DEsQ4dUDCAY&uact=5' decode_ex = requests.utils.unquote(url_ex) print(url_ex) print(decode11) # 이미 디코딩 되어있는 API키였음 # - # **[15장: 471페이지]** # + import requests LAT = '37.57' # 위도 LON = '126.98' # 경도 API_KEY = '' # API 키(실행되지 않는 임의의 API 키) UNIT = 'metric' # 단위 req_url = "http://api.openweathermap.org/data/2.5/weather" req_parameter = {"lat":LAT, "lon":LON , "appid": API_KEY, "units":UNIT} # 파라미터를 딕셔너리에 입력 r = requests.get(req_url,params=req_parameter) # requests.get()함수안에 params=을 써서 속성값 지정 # 속성값으로 입력된 req_parameter딕셔너리는 하위주소로 자동생성됨 # 해당 주소의 정보를 불러옴 print(r.url) # 해당 주소 정보의 url불러오기 print(r.text) print(r) # 여기서 쓰인 requests.get()함수는 딕셔너리를 자동으로 입력하여 주소를 만들어주기때문에 꼭 필요한 기능을 수행함 # - # #### 웹 사이트 주소의 인코딩과 디코딩 # - requests.utils.unquote()로 url 주소 디코딩하기 # **[15장: 471 ~ 472페이지]** # + import requests API_KEY = "" # 서버와 브라우저 간 통신을 하기위해 인코딩이 되어있는 API키 API_KEY_decode = requests.utils.unquote(API_KEY) # requests 모듈의 utils.unquote()함수로 API를 디코딩함 print("Encoded url(인코딩된 주소):", API_KEY) print("Decoded url(디코딩된 주소):", API_KEY_decode) # - # **[15장: 472페이지]** # + req_url = "http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList" # 상위주소 tm_x = 244148.546388 # 1번 파라미터 tm_x tm_y = 412423.75772 # 2번 파라미터 tm_y req_parameter = {"ServiceKey":API_KEY_decode, "tmX":tm_x, "tmY":tm_y} # API와 그외 파라미터를 담아 딕셔너리 생성 r = requests.get(req_url, params = req_parameter) # requests.get()으로 상위주소와 딕셔너리 파라미터를 담아서 전체주소를 만들고, # 해당 주소의 정보 가져오기 print(r.url) # 해당 주소의 url 가져오기 # + # 가져온 url 디코딩하기 url = 'http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList?ServiceKey=2Bx3evdvbaRBvhWEerg3efac2r3f3RfhDTERTw%2B9rkvoewRV%2Fovmrk3dq%3D%3D&tmX=244148.546388&tmY=412423.75772' decode1 = requests.utils.unquote(url) decode1 #디코딩을 하면 전체 주소는 (상위주소) ? (API_key) & (파라미터1) & (파라미터2).. 로 구성됨 # - # **[15장: 473페이지]** # + req_url = "http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList" # 상위주소 req_parameter = {"ServiceKey":API_KEY, "tmX":tm_x, "tmY":tm_y} # 디코딩하지 않은 API와 다른 파라미터로 딕셔너리 생성 r = requests.get(req_url, params = req_parameter) # 상위주소와 하위주소(딕셔너리정보)를 넣어 requests.get으로 정보 받아오기 print(r.url) # 받아온 정보에서 url 불러오기 (동일) # - # #인코딩 url - http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList?ServiceKey=et5piq3pfpqLEWPpCbvtSQ%252Bertertg%252Bx3evdvbaRBvhWEerg3efac2r3f3RfhDTERTw%252B9rkvoewRV%252Fovmrk3dq%253D%253D&tmX=244148.546388&tmY=412423.75772 # #디코딩 url - http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList?ServiceKey=et5piq3pfpqLEWPpCbvtSQ%2Bertertg%2Bx3evdvbaRBvhWEerg3efac2r3f3RfhDTERTw%2B9rkvoewRV%2Fovmrk3dq%3D%3D&tmX=244148.546388&tmY=412423.75772 # ## 15.2 API 키를 사용하지 않고 데이터 가져오기 # ### 국제 우주 정거장의 정보 가져오기 (API가 필요없는 경우) # **[15장: 473 ~ 474페이지]** # + import requests # 주소의 정보를 가져올 때 사용 import json # 제이슨데이터를 파이썬데이터로 가져올 때 사용 url = "http://api.open-notify.org/iss-now.json" # 주소 r = requests.get(url) # 해당 주소의 정보를 가져오기 r.text # url의 텍스트를 출력하기 (제이슨파일) # - # **[15장: 474페이지]** # + json_to_dict = json.loads(r.text) # 가져온 제이슨데이터를 파이썬데이터로 변경 print(json_to_dict) print(type(json_to_dict)) # 파이썬 딕셔너리 형태로 변경됨 # - # **[15장: 474 ~ 475페이지]** # - requests 모듈의 .json()함수를 사용해서 바로 제이슨데이터를 파이썬데이터로 가져오기 # + import requests url = "http://api.open-notify.org/iss-now.json" r = requests.get(url) # 해당 url의 정보 가져오기 json_to_dict = r.json() # 해당 사이트의 제이슨 정보를 바로 파이썬데이터로 가져오기 .json()함수 사용 print(json_to_dict) type(json_to_dict) # 파이썬데이터로 저장 변수 타입확인 # - # **[15장: 475페이지]** # - requests.get(url).json()을 한번에 써서 해당 url의 제이슨데이터를 파이썬데이터로 한번에 가져오기 # + import requests # 사이트 정보 가져오는 모듈 호출 url = "http://api.open-notify.org/iss-now.json" # url 지정 json_to_dict = requests.get(url).json() # 해당 url의 제이슨데이터를 파이썬데이터로 받아옴(.json()함수 사용) print(json_to_dict) type(json_to_dict) # - # **[15장: 475페이지]** json_to_dict # 딕셔너리데이터 출력 # **[15장: 475페이지]** # + # 딕셔너리의 키로 값 호출 print(json_to_dict["iss_position"]) print(json_to_dict["iss_position"]["latitude"]) print(json_to_dict["iss_position"]["longitude"]) print(json_to_dict["message"]) print(json_to_dict["timestamp"]) # - # **[15장: 476페이지]** # + import requests # 사이트 정보 가져오는 모듈 호출 import time # 출력 간격 지정할 때 사용할 모듈 호출 url = "http://api.open-notify.org/iss-now.json" # 사이트 주소 지정 # 함수 생성 def ISS_Position(api_url): # url을 파라미터로 받음 json_to_dict = requests.get(api_url).json() # url에서 json정보를 파이썬데이터로 받아옴 return json_to_dict["iss_position"] # 받아온 딕셔너리의 iss_position키의 값만 리턴값으로 반환 for k in range(5): # for 반복문으로 5회 반복 print(ISS_Position(url)) # 함수 실행 (리턴값 출력) time.sleep(1) # 10초간 휴식 (정거장의 위치를 10초에 한번씩 출력하기) # - # #### 지도에서 위치 표시하기 (반복변수 실패) # !pip install folium # + import requests import time import folium url = "http://api.open-notify.org/iss-now.json" # 사이트 주소 지정 iss_lon = 0 iss_lat = 0 # 함수 생성 def ISS_Position(iss_position_api_url): json_to_dict = requests.get(iss_position_api_url).json() global iss_lon global iss_lat iss_lon = json_to_dict["iss_position"]['longitude'] iss_lat = json_to_dict["iss_position"]['latitude'] ISS_loc = folium.Map(location =[iss_lon, iss_lat]) ISS_loc #for k in range(5): # for 반복문으로 5회 반복 # ISS_Position(url) # 함수 실행 # ISS_loc = folium.Map(location =[iss_lon, iss_lat]) # time.sleep(5) # 10초간 휴식 (정거장의 위치를 10초에 한번씩 출력하기) # - # ### 국가 정보 가져오기 # **[15장: 476 ~ 477페이지]** # + import requests url_temp = "https://restcountries.eu/rest/v1/name/" # 각 나라별 정보를 보여주는 사이트 (상위주소) country = "South Korea" # 파라미터1 (국가명) url = url_temp + country # 상위주소 + 파라미터 js_data = requests.get(url).text # 해당 주소의 url정보를 텍스트로 출력 display(js_data) # 텍스트로 출력(제이슨데이터) type(js_data) # - # **[15장: 477페이지]** json_to_list = requests.get(url).json() # 해당 주소의 제이슨데이터를 파이썬데이터로 저장 (리스트에 담긴 딕셔너리 형태) json_to_list json_to_list[0] # 가장 밖에 있는 리스트 벗기기 # **[15장: 478페이지]** json_to_list[0]["capital"] # 딕셔너리 전체가 리스트에 들어있음 # 0번째 리스트의 'capital'키를 갖는 벨류 출력 (수도명) # **[15장: 478페이지]** # + import requests # 사이트의 정보를 불러올 때 사용 import json # 사이트의 제이슨데이터를 파이썬데이터로 변환할 때 사용 countries =["South Korea", "United States of America", "United Kingdom", "France", "Germany"] # 국가명 리스트 생성 def country_to_capital(country): # 함수 생성 (파라미터는 국가명) url_temp = "https://restcountries.eu/rest/v1/name/" # 상위url(api) url = url_temp + country # 상위url+파라미터(국가명) json_to_list = requests.get(url).json() # 해당 주소의 제이슨데이터를 파이썬데이터로 저장 (리스트안의 딕셔너리) return json_to_list[0]["capital"] # 리턴값으로 수도명 출력 for country in countries: # for반복문 실행, 국가명 리스트의 원소를 하나씩 실행 capital = country_to_capital(country) # 함수에 반복변수(국가명)을 넣어 실행 (리턴값 수도명) print("*{0}: {1}".format(country, capital)) # 반복변수(국가명)와 for반복문의 리턴값(국가명)을 문자열에 출력 # - # ## 15.3 트위터에 메시지 작성하고 가져오기 # ### API 키 및 접속 토큰 생성 # # ### Tweepy 설치 및 인증 # https://developer.twitter.com/en/apps 트위터 개발자 사이트 # **[15장: 485페이지]** # !pip install tweepy # + import tweepy # 본인이 신청해서 생성한 문자열을 각각 복사해 넣습니다. consumer_key = '7JXVmTEI9Fpxyi9CneAnJYYvL' consumer_secret = '' access_token = '' access_secret = '' # - # **[15장: 485페이지]** auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # OAuth 인증 실행 auth.set_access_token(access_token, access_secret) # **[15장: 485페이지]** api = tweepy.API(auth) # API클래스를 이용해 api 객체 생성 api # **[15장: 486페이지]** print("name:",api.me().name) # api키를 넣어 트위터 계정의 이름을 출력 # ### 트윗 작성하기 # @sample57448511 # **[15장: 487페이지]** tweet_var = api.update_status('.update_status()함수를 사용하여 트윗 작성하기') # 생성된 api와 tweepy라이브러리의 .update_status()함수를 결합시켜 사용해 트윗을 작성함 tweet_var # 트위터 메시지 변수의 상태정보 # **[15장: 487페이지]** # 이미지 업로드하기 tweet_img_var = api.update_with_media("C:/myPyCode/download/python-logo.png", '.update_with_media()함수를 사용하여 이미지와 텍스트를 작성') tweet_img_var # 트위터 메시지 변수의 상태정보 # ### 타임라인에서 메시지 가져오기 # - 키워드를 지정해 데이터 가져오기 # 1. Tweepy의 Stream_Listener를 상속받아 클래스를 정의 # 2. 정의한 클래스를 이용해 객체를 생성 # 3. 생성한 객체를 Tweepy의 Stream을 이용해 트위터 Stream API와 연결 # 4. Stream의 Filter를 이용해 단어를 지정하고 Stream을 시작 (Tweepy의 Stream) # **[15장: 488페이지]** for status in tweepy.Cursor(api.home_timeline).items(2): print("*", status.text) # **[15장: 489페이지]** for status in tweepy.Cursor(api.home_timeline).items(2): print("*", status._json['text']) print(" ==> Created at", status._json['created_at']) # ### 키워드를 지정해 데이터 가져오기 # **[15장: 490페이지]** # + import tweepy class MyStreamListener(tweepy.StreamListener): # tweepy.StreamListener를 상속받는 클래스를 생성 def on_status(self, status): # 클래스 안에 기능을 수행하는 함수 존재 print(status.text) # 140자까지 출력 # - # **[15장: 490페이지]** myStreamListener = MyStreamListener() # 객체 생성 # **[15장: 490페이지]** myStream = tweepy.Stream(auth, myStreamListener) # # **[15장: 491페이지]** # + #myStream.filter(track = ['파이썬', 'python']) # 단어 목록을 필터에 걸음 # - # **[15장: 491페이지]** class MyStreamListener(tweepy.StreamListener): def __init__(self): super().__init__() self.tweet_num = 0 def on_status(self, status): self.tweet_num = self.tweet_num + 1 # 메세지가 올때마다 1 증가 if(self.tweet_num <= 5): print("***", status.text) # 140자까지 출력 return True else: return False # **[15장: 491페이지]** myStreamListener = MyStreamListener() myStream = tweepy.Stream(auth, myStreamListener) myStream.filter(track = ['trump']) # **[15장: 492 ~ 493페이지]** # + import tweepy # 키, 토근, 비밀번호 지정 consumer_key = 'YOUR-CONSUMER-KEY' consumer_secret = 'YOUR-CONSUMER-SECRET' access_token = 'YOUR-ACCESS-TOKEN' access_secret = 'YOUR-ACCESS-SECRET' # OAuth 인증 진행 auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) # 인증된 auth 변수를 이용해 트위터 API 클래스의 정의 class MyStreamListener2(tweepy.StreamListener): def __init__(self, max_num): super().__init__() self.tweet_num = 0 self.max_num = max_num def on_status(self, status): self.tweet_num = self.tweet_num + 1 file_name = 'C:/myPyCode/data/twitter_stream_test.txt' if(self.tweet_num <= self.max_num ): with open(file_name, 'a', encoding="utf-8") as f: write_text = "*** " + status.text + "\n" f.write(write_text) return True else: return False def on_error(self, status): print(status) # 오류 메시지 출력 return False if __name__ == '__main__': myStreamListener = MyStreamListener2(5) myStream = tweepy.Stream(auth, myStreamListener) myStream.filter(track = ['머신 러닝', 'Machine Learning']) print("End of streaming!") # - # ## 15.4 정부의 공공 데이터 가져오기 # ### 회원 가입 및 서비스 신청 # ### 주소 및 우편번호 가져오기 # https://www.data.go.kr # **[15장: 502페이지]** # + import requests # 주소 정보 가져오는 모듈 호출 API_KEY = '' # 인증키 API_KEY_decode = requests.utils.unquote(API_KEY) # 디코딩한 인증키 print(API_KEY) print(API_KEY_decode) # 출력 # - # **[15장: 503페이지]** # + req_url = "http://openapi.epost.go.kr/postal/retrieveNewAdressAreaCdService/retrieveNewAdressAreaCdService/getNewAddressListAreaCd?" # 상위 주소 search_Se = "road" # 검색구분 (dong, road, post 중에 선택) srch_wrd = "동일로52길 3-4" # 검색어 req_parameter = {"ServiceKey":API_KEY_decode, "searchSe":search_Se, "srchwrd":srch_wrd} # 하위 주소 생성 r = requests.get(req_url, params = req_parameter) # 상위주소 + 하위주소의 url 정보 받아오기 xml_data = r.text # xml 텍스트데이터를 변수에 저장 print(xml_data) # string 데이터 # - # **[15장: 503페이지]** # + import xmltodict # xml데이터를 파이썬데이터로 변환하는 모듈 호출 dict_data = xmltodict.parse(xml_data) # xmltodict.parse()함수를 이용해 xml데이터를 파이썬테이터로 변환 dict_data # - # **[15장: 504페이지]** dict_data['NewAddressListResponse']['newAddressListAreaCd'] #불필요한 정보를 제외한 주소 딕셔너리 # + adress_list = dict_data['NewAddressListResponse']['newAddressListAreaCd'] # 딕셔너리 키를 넣어 주소리스트 지정 print("[입력한 도로명 주소]", srch_wrd) # 위에서 입력한 주소명 print("[응답 데이터에서 추출한 결과]") print("- 우편번호:", adress_list['zipNo']) # 주소리스트의 zipNo 값 출력 (우편번호) print("- 도로명 주소:", adress_list['lnmAdres']) # 주소리스트의 lnmAdres 값 출력 (도로명주소) print("- 지번 주소:", adress_list['rnAdres']) # 주소리스트의 rnAdres 값 출력 (지번주소) # - # ### 날씨 정보 가져오기 # #### 날씨 정보를 위한 서비스 신청 # #### 날씨 실황 조회 # **[15장: 509페이지]** # + import requests # 웹사이트 정보 가져오는 모듈 호출 API_KEY = 'UqlfWtXrhSxnPuX3G9jNHGX6K2jS1pmP%2Fz4AzQr9DRS6AWtBKNbYs8gOnz4o2H6gdBPSac0wR229Gy%2BJYHCh9Q%3D%3D' # 승인된 API키 API_KEY_decode = requests.utils.unquote(API_KEY) # 디코딩 하기 print('인코딩 API_key :', API_KEY) print('디코딩 API_key :', API_KEY_decode) # - # **[15장: 510 ~ 511페이지]** # + import json # 제이슨데이터를 파이썬 데이터로 변환하는 모듈 import datetime # 날짜정보를 가져오는 모듈 # [날짜 및 시간 설정] now = datetime.datetime.now() # 현재 날짜와 시간을 변수에 저장 (년월일시분초) date = "{:%Y%m%d}".format(now) # 현재의 년월일을 변수에 저장, {:년월일} (%Y 년, %m월, %d일) time = "{:%H00}".format(now) # 현재의 시간(정시)을 변수에 저장, {:시00} (%H 시간, 00 정시표시) if (now.minute >= 30): time = "{0}00".format(now.hour) # 현재 분이 30분 이상이면 이후 시간(정시) 설정 else: time = "{0}00".format(now.hour-1) # 현재 분이 30분 미만이면 현재 시간(정시) 설정 time = '1000' # [요청 주소 및 요청 변수 지정] req_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastGrib" # 요청 주소 baseDate = date # 예보 일자 지정 baseTime = time # 예보 시간 지정(정시로 지정) nx_val = 62 # 예보지점 X 좌표(서울시 광진구 군자동) ny_val = 126 # 예보지점 Y 좌표(서울시 광진구 군자동) num_of_rows = 6 # 한 페이지의 출력 개수 page_no = 1 # 페이지 번호 output_type = "json" # 응답 데이터의 형식 req_parameter = {"ServiceKey":API_KEY_decode, # 파라미터1 : API키 "nx":nx_val, "ny": ny_val, # 파라미터2, 3: x, y좌표 "base_date":baseDate, "base_time":baseTime, # 파라미터4, 5 : 예보일자, 예보시간 "pageNo":page_no, "numOfRows":num_of_rows, # 파라미터6, 7 : 페이지번호, 한페이지의 출력개수 "_type":output_type} # 파라미터8 : 응답 데이터 형식 # [데이터 요청] r = requests.get(req_url, params = req_parameter) # 해당주소의 데이터를 가져오기 dict_data = r.json() # json형태의 데이터를 파이썬데이터로 변환 dict_data # + #편집 import json # 제이슨데이터를 파이썬 데이터로 변환하는 모듈 import datetime # 날짜정보를 가져오는 모듈 # [날짜 및 시간 설정] now = datetime.datetime.now() # 현재 날짜와 시간을 변수에 저장 (년월일시분초) date = "{:%Y%m%d}".format(now) # 현재의 년월일을 변수에 저장, {:년월일} (%Y 년, %m월, %d일) time = "{:%H00}".format(now) # 현재의 시간(정시)을 변수에 저장, {:시00} (%H 시간, 00 정시표시) now_hour_edit = 0 if (now.hour < 10): global now_hour_edit now_hour_edit = '0' + now.hour else: now_hour_edit = now.hour print(now_hour_edit) if (now.minute >= 30): time = "{0}00".format(now_hour_edit) # 현재 분이 30분 이상이면 이후 시간(정시) 설정 else: time = "{0}00".format(now_hour_edit-1) # 현재 분이 30분 미만이면 현재 시간(정시) 설정 # [요청 주소 및 요청 변수 지정] req_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastGrib" # 요청 주소 baseDate = date # 예보 일자 지정 baseTime = time # 예보 시간 지정(정시로 지정) nx_val = 62 # 예보지점 X 좌표(서울시 광진구 군자동) ny_val = 126 # 예보지점 Y 좌표(서울시 광진구 군자동) num_of_rows = 6 # 한 페이지의 출력 개수 page_no = 1 # 페이지 번호 output_type = "json" # 응답 데이터의 형식 req_parameter = {"ServiceKey":API_KEY_decode, # 파라미터1 : API키 "nx":nx_val, "ny": ny_val, # 파라미터2, 3: x, y좌표 "base_date":baseDate, "base_time":baseTime, # 파라미터4, 5 : 예보일자, 예보시간 "pageNo":page_no, "numOfRows":num_of_rows, # 파라미터6, 7 : 페이지번호, 한페이지의 출력개수 "_type":output_type} # 파라미터8 : 응답 데이터 형식 # [데이터 요청] r = requests.get(req_url, params = req_parameter) # 해당주소의 데이터를 가져오기 dict_data = r.json() # json형태의 데이터를 파이썬데이터로 변환 dict_data # - # **[15장: 512 ~ 513페이지]** # + # [딕셔너리 데이터를 분석해서 원하는 값 추출] weather_items = dict_data['response']['body']['items']['item'] sky_cond = ["맑음", "구름 조금", "구름 많음", "흐림"] rain_type = ["없음", "비", "진눈개비", "눈"] print("[ 발표 날짜: {} ]".format(weather_items[0]['baseDate'])) print("[ 발표 시간: {} ]".format(weather_items[0]['baseTime'])) for k in range(len(weather_items)): weather_item = weather_items[k] obsrValue = weather_item['obsrValue'] if(weather_item['category'] == 'T1H'): print("* 기온: {} 도".format(obsrValue)) elif(weather_item['category'] == 'REH'): print("* 습도: {} 퍼센트".format(obsrValue)) elif(weather_item['category'] == 'SKY'): print("* 하늘: {}".format(sky_cond[obsrValue-1])) elif(weather_item['category'] == 'PTY'): print("* 강수: {}".format(rain_type[obsrValue])) # - # #### 일기 예보 조회 # **[15장: 513 ~ 515페이지]** # + import json # 제이슨데이터를 파이썬데이터로 변환하는 모듈 import datetime # 현재 날짜시간정보를 가져오는 모듈 # [날짜 및 시간 설정] now = datetime.datetime.now() # 현재 날짜와 시간을 변수에 저장(년월일시분초) date = "{:%Y%m%d}".format(now) # 현재시간의 년월일을 변수에 저장 {:년월일} time = "{:%H00}".format(now) # 현재시간의 시간(정시)을 변수에 저장 # 현재 분이 30분 이전이면 이전 시간(정시)을 설정 if (now.minute >= 30): time = "{0,0=2d}00".format(now.hour) else: time = "{0,0=2d}00".format(now.hour-1) # time = '1000' # [요청 주소 및 요청 변수 지정] req_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastTimeData" baseDate = date # 예보 일자 지정 baseTime = time # 예보 시간 지정(정시로 지정) nx_val = 61 # 예보지점 X 좌표(서울시 강남구 역삼동) ny_val = 125 # 예보지점 Y 좌표(서울시 강남구 역삼동) num_of_rows = 50 # 한 페이지에 포함된 결과 수 page_no = 1 # 페이지 번호 output_type = "json" # 응답 데이터 형식 지정 req_parameter = {"ServiceKey":API_KEY_decode, "nx":nx_val, "ny": ny_val, "base_date":baseDate, "base_time":baseTime, "pageNo":page_no, "numOfRows":num_of_rows, "_type":output_type} # [데이터 요청] r = requests.get(req_url, params = req_parameter) dict_data = r.json() # json데이터를 파이썬데이터로 변환 weather_items = dict_data['response']['body']['items']['item'] # 변환된 파이썬데이터에서 원하는 값만 추출 #print(len(weather_items)) #print(weather_items) sky_cond = ['없음', "맑음", "구름 조금", "구름 많음", "흐림"] # 날씨상태 리스트 rain_type = ["없음", "비", "진눈개비", "눈", '소나기'] # 강수형태 리스트 print("[ 발표 날짜: {} ]".format(weather_items[0]['baseDate'])) # 발표일 출력 print("[ 발표 시간: {} ]".format(weather_items[0]['baseTime'])) # 발표시간 출력 print("[ 초단기 일기 예보 ]") # 문자열 출력 # [for 반복문] for k in range(len(weather_items)): # weather_items의 길이만큼 실행 (30개) weather_item = weather_items[k] fcstTime = weather_item['fcstTime'] # weather_items 딕셔너리의 fcstTime키의 값을 변수에 저장 fcstValue = weather_item['fcstValue'] # weather_items 딕셔너리의 fcstValue키의 값을 변수에 저장 if(weather_item['category'] == 'T1H'): print("* 시간: {0}, 기온: {1} 도".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'REH'): print("* 시간: {0}, 습도: {1} 퍼센트".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'SKY'): print("* 시간: {0}, 하늘: {1}".format(fcstTime, sky_cond[fcstValue-1])) elif(weather_item['category'] == 'PTY'): print("* 시간: {0}, 강수: {1}".format(fcstTime, rain_type[fcstValue])) # - # # 실습 # + import json # 제이슨데이터를 파이썬데이터로 변환하는 모듈 import datetime # 현재 날짜시간정보를 가져오는 모듈 now = datetime.datetime.now() # 현재 날짜와 시간을 변수에 저장(년월일시분초) date = "{:%Y%m%d}".format(now) # 현재시간의 년월일을 변수에 저장 {:년월일} time = '1000' #현재 시간을 임의로 설정 # [요청 주소 및 요청 변수 지정] req_url = "http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastTimeData" baseDate = date # 발표 일자 지정 baseTime = time # 발표 시간 지정(정시로 지정) nx_val = 61 # 예보지점 X 좌표(서울시 강남구 역삼동) ny_val = 125 # 예보지점 Y 좌표(서울시 강남구 역삼동) num_of_rows = 30 # 한 페이지에 포함된 결과 수 page_no = 1 # 페이지 번호 output_type = "json" # 응답 데이터 형식 지정 req_parameter = {"ServiceKey":API_KEY_decode, "nx":nx_val, "ny": ny_val, "base_date":baseDate, "base_time":baseTime, "pageNo":page_no, "numOfRows":num_of_rows, "_type":output_type} r = requests.get(req_url, params = req_parameter) # 데이터 요청 dict_data = r.json() # json데이터를 파이썬데이터로 변환 weather_items = dict_data['response']['body']['items']['item'] # 변환된 파이썬데이터에서 원하는 값만 추출 sky_cond = ['없음', "맑음", "구름 조금", "구름 많음", "흐림"] # 날씨상태 리스트 rain_type = ["없음", "비", "진눈개비", "눈"] # 강수형태 리스트 thunder_type = ['확률없음', '낮음', '보통', '높음'] # 낙뢰 정보 리스트 print("[ 발표 날짜: {} ]".format(weather_items[0]['baseDate'])) # 발표일 출력 print("[ 발표 시간: {} ]".format(weather_items[0]['baseTime'])) # 발표시간 출력 print("[ 초단기 일기 예보 ]") # 문자열 출력 # [for 반복문] for k in range(len(weather_items)): # weather_items의 길이만큼 실행 (30개) weather_item = weather_items[k] fcstTime = weather_item['fcstTime'] # 예측 시간, weather_items 딕셔너리의 fcstTime키의 값을 변수에 저장 fcstValue = weather_item['fcstValue'] # 예측 값, weather_items 딕셔너리의 fcstValue키의 값을 변수에 저장 if(weather_item['category'] == 'T1H'): # 기온 print("* 시간: {0}, 기온: {1} ℃".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'RN1'): # 강수량 print("* 시간: {0}, 1시간 강수량: {1} mm".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'SKY'): print("* 시간: {0}, 하늘상태: {1}".format(fcstTime, sky_cond[fcstValue])) elif(weather_item['category'] == 'UUU'): # 바람1 print("* 시간: {0}, 동서바람성분: {1} m/s".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'VVV'): # 바람2 print("* 시간: {0}, 남북바람성분: {1} m/s".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'REH'): # 습도 print("* 시간: {0}, 습도: {1} %".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'PTY'): # 강수형태 print("* 시간: {0}, 강수형태: {1}".format(fcstTime, rain_type[fcstValue])) elif(weather_item['category'] == 'LGT'): # 낙뢰 print("* 시간: {0}, 낙뢰: {1} ".format(fcstTime, thunder_type[fcstValue])) elif(weather_item['category'] == 'VEC'): # 풍향 print("* 시간: {0}, 풍향: {1} 0".format(fcstTime, fcstValue)) elif(weather_item['category'] == 'WSD'): # 풍속 print("* 시간: {0}, 풍속: {1} 1".format(fcstTime, fcstValue)) # - {'baseDate': 20190808, 'baseTime': 1030, 'category': 'LGT', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'LGT', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'PTY', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'PTY', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'RN1', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'RN1', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 0, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'SKY', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 4, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'SKY', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 4, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'T1H', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 31, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'T1H', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 31, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'REH', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 75, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'REH', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 75, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'UUU', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0.5, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'UUU', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 1.3, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'VVV', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0.7, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'VVV', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 0.8, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'VEC', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 216, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'VEC', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 238, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'WSD', 'fcstDate': 20190808, 'fcstTime': 1100, 'fcstValue': 0.9, 'nx': 61, 'ny': 125}, {'baseDate': 20190808, 'baseTime': 1030, 'category': 'WSD', 'fcstDate': 20190808, 'fcstTime': 1200, 'fcstValue': 1.5, 'nx': 61, 'ny': 125} 발표일자 발표시간 예보값(카테고리) 예측일자 예측시간 예보값(수치) x좌표 y좌표 # ### 대기 오염 정보 가져오기 # #### 대기 오염 정보를 위한 서비스 신청 # #### 근접 측정소 목록 조회하기 # **[15장: 519페이지]** # + import requests # 웹사이트 정보 가져오는 모듈 호출 API_KEY = '' # API인증키 입력 API_KEY_decode = requests.utils.unquote(API_KEY) # 디코딩실행 print('인코딩 API :', API_KEY) print('디코딩 API :', API_KEY_decode) # - # **[15장: 520페이지] 동이름을 입력해서 TM 기준좌표 구하기** # + req_url = "http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getTMStdrCrdnt" # 상위주소 입력 # 측정소정보 조회 서비스의 TM 기준좌표 조회 오퍼레이션 umd_name = "군자동" #읍, 면, 동 지정 # 동을 변수에 저장, 논현동 num_of_rows = 10 # 한 페이지에 포함된 결과 수 # 한페이지 출력개수를 변수에 저장 page_no = 1 # 페이지 번호 # 출력할 페이지수를 변수에 저장 output_type = "json" # 불러올 데이터형식을 변수에 저장 req_parameter = {"ServiceKey":API_KEY_decode, "umdName":umd_name, # 파라미터1, 2 : API키, 동이름 "pageNo":page_no, "numOfRows":num_of_rows, # 파라미터3, 4 : 출력할페이지수, 한페이지출력개수 "_returnType":output_type} # 파라미터5 : 불러올데이터형식 dict_data = requests.get(req_url, params = req_parameter).json() # 상위주소와 파라미터를 입력해서 생성한주소의 json정보를 # 파이썬데이터로 불러오기 print(dict_data['totalCount']) # totalCount항목의 값 print(dict_data) # + {'MsrstnInfoInqireSvrVo': {'_returnType': 'json', 'addr': '', 'districtNum': '', 'dmX': '', 'dmY': '', 'item': '', 'mangName': '', 'map': '', 'numOfRows': '10', 'oper': '', 'pageNo': '1', 'photo': '', 'resultCode': '', 'resultMsg': '', 'rnum': 0, 'serviceKey': '', 'sggName': '', 'sidoName': '', 'stationCode': '', 'stationName': '', 'tm': 0, 'tmX': '', 'tmY': '', 'totalCount': '', 'umdName': '논현동', 'ver': '', 'vrml': '', 'year': ''}, 'list': [{'_returnType': 'json', 'addr': '', 'districtNum': '', 'dmX': '', 'dmY': '', 'item': '', 'mangName': '', 'map': '', 'numOfRows': '10', 'oper': '', 'pageNo': '1', 'photo': '', 'resultCode': '', 'resultMsg': '', 'rnum': 0, 'serviceKey': '', 'sggName': '강남구', 'sidoName': '서울특별시', 'stationCode': '', 'stationName': '', 'tm': 0, 'tmX': '202733.974301', 'tmY': '445717.50469', 'totalCount': '', 'umdName': '논현동', 'ver': '', 'vrml': '', 'year': ''}, {'_returnType': 'json', 'addr': '', 'districtNum': '', 'dmX': '', 'dmY': '', 'item': '', 'mangName': '', 'map': '', 'numOfRows': '10', 'oper': '', 'pageNo': '1', 'photo': '', 'resultCode': '', 'resultMsg': '', 'rnum': 0, 'serviceKey': '', 'sggName': '남동구', 'sidoName': '인천광역시', 'stationCode': '', 'stationName': '', 'tm': 0, 'tmX': '175850.136025', 'tmY': '434153.586394', 'totalCount': '', 'umdName': '논현동', 'ver': '', 'vrml': '', 'year': ''} ], 'parm': {'_returnType': 'json', 'addr': '', 'districtNum': '', 'dmX': '', 'dmY': '', 'item': '', 'mangName': '', 'map': '', 'numOfRows': '10', 'oper': '', 'pageNo': '1', 'photo': '', 'resultCode': '', 'resultMsg': '', 'rnum': 0, 'serviceKey': '', 'sggName': '', 'sidoName': '', 'stationCode': '', 'stationName': '', 'tm': 0, 'tmX': '', 'tmY': '', 'totalCount': '', 'umdName': '논현동', 'ver': '', 'vrml': '', 'year': ''}, 'totalCount': 2 } # 4개의 키가 들어있는 딕셔너리 # - # **[15장: 520 ~ 521페이지]** # + print("[입력한 읍/면/동명]", umd_name) # 문자열과 입력한 동이름을 출력 print("[TM 기준 좌표 조회 결과]") # 문자열 출력 for k in range(dict_data['totalCount']): # tatalCount키의 값 만큼(리스트의 개수만큼) 반복 sido = dict_data['list'][k]['sidoName'] # list키값의 k번째 리스트의 sidoName키값 출력 sgg = dict_data['list'][k]['sggName'] # list키값의 k번째 리스트의 sggName키값 출력 umd = dict_data['list'][k]['umdName'] # list키값의 k번째 리스트의 umdName키값 출력 tmX = dict_data['list'][k]['tmX'] # list키값의 k번째 리스트의 tmX키값 출력 tmY = dict_data['list'][k]['tmY'] # list키값의 k번째 리스트의 tmY키값 출력 print("- 위치: {0} {1} {2}".format(sido, sgg, umd)) # list키값의 k번째 리스트의 시도, 구, 동 변수를 문자열에 입력 print("- k = {0}, TM 좌표(X, Y): {1}, {2}\n".format(k, tmX, tmY)) # 변수 k, tmX, tmY를 문자열에 입력 # TM좌표? 정부기관에서 사용하는 좌표명 # - # **[15장: 521페이지]** # + k = 1 # 원하는 위치 선택 (출력된 여러 동 중에 하나의 동 선택하기) TM_X = dict_data['list'][k]['tmX'] # TM X 좌표 # list키값의 k번째 리스트의 tmX키값 선택 TM_Y = dict_data['list'][k]['tmY'] # TM Y 좌표 # list키값의 k번째 리스트의 tmY키값 선택 print("TM 좌표(X, Y): {0}, {1}".format(TM_X, TM_Y)) # list키값의 k번째 리스트의 tmX, tmY키값을 문자열과 출력 # - # **[15장: 522 ~ 523페이지] 근접측정소 구하기** # + req_url = "http://openapi.airkorea.or.kr/openapi/services/rest/MsrstnInfoInqireSvc/getNearbyMsrstnList" # 측정소정보 조회 서비스의 근접측정소 목록 조회 오퍼레이션 x_value = TM_X # TM 측정방식 X좌표 y_value = TM_Y # TM 측정방식 Y좌표 num_of_rows = 10 # 한 페이지에 포함된 결과 수 page_no = 1 # 페이지 번호 output_type = "json" req_parameter = {"ServiceKey":API_KEY_decode, "tmX":x_value, "tmY":y_value, "pageNo":page_no, "numOfRows":num_of_rows, "_returnType":output_type} dict_data = requests.get(req_url, params = req_parameter).json() print("해당 지역 근처에 있는 측정소의 개수:", dict_data['totalCount']) # - # **[15장: 523페이지]** # + print("[측정소 정보]") for k in range(dict_data['totalCount']): stationName = dict_data['list'][k]['stationName'] ditance = dict_data['list'][k]['tm'] addr = dict_data['list'][k]['addr'] print("- 측정소 이름:{0}, 거리:{1}[km]".format(stationName, ditance)) print("- 측정소 주소:{0} \n".format(addr)) # - # ## 측정 정보 가져오기 # **[15장: 525페이지]** # + req_url = "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getMsrstnAcctoRltmMesureDnsty" # 대기오염 정보 조회서비스의 측정소별 실시간 측정정보 조회 오퍼레이션 station_name = "도산대로" # 측정소명을 변수로 지정 data_term = "DAILY" # 데이터기간을 'Daily'로 설정하고 변수로 지정 num_of_rows = 10 # 한페이지 출력할 결과수 지정 page_no = 1 # 출력할 페이지수 지정 version = 1.3 # 오퍼레이션 버젼 지정 output_type = "json" # 출력할 데이터타입을 json으로 지정 req_parameter = {"ServiceKey": API_KEY_decode, # 파라미터1 : API키 "stationName": station_name, # 파라미터2 : 측정소이름 "dataTerm":data_term, "ver": version, # 파라미터3,4 : 데이터기간, 오퍼레이션버젼 "pageNo": page_no, "numOfRows" : num_of_rows, # 파라미터5,6 : 출력할페이지수, 한페이지출력 결과수 "_returnType": output_type} # 파라미터7 : 출력할 데이터타입 dict_data = requests.get(req_url, params = req_parameter).json() # 해당주소의 정보 호출하기 dict_data['list'][0] # list키값의 첫번째 리스트값 출력 #dict_data # - # **[15장: 527페이지]** # + #Grade dataTime = dict_data['list'][0]['dataTime'] # 해당 딕셔너리의 dataTime키값을 변수에 지정 so2Grade = dict_data['list'][0]['so2Grade'] # 해당 딕셔너리의 so2Grade키값을 변수에 지정 coGrade = dict_data['list'][0]['coGrade'] # 해당 딕셔너리의 coGrade키값을 변수에 지정 o3Grade = dict_data['list'][0]['o3Grade'] # 해당 딕셔너리의 o3Grade키값을 변수에 지정 no2Grade = dict_data['list'][0]['no2Grade'] # 해당 딕셔너리의 no2Grade키값을 변수에 지정 pm10Grade1h = dict_data['list'][0]['pm10Grade1h'] # 해당 딕셔너리의 pm10Grade1h키값을 변수에 지정 pm25Grade1h = dict_data['list'][0]['pm25Grade1h'] # 해당 딕셔너리의 pm25Grade1h키값을 변수에 지정 khaiGrade = dict_data['list'][0]['khaiGrade'] # 해당 딕셔너리의 khaiGrade키값을 변수에 지정 #Value so2Value = dict_data['list'][0]['so2Value'] # 해당 딕셔너리의 so2Grade키값을 변수에 지정 coValue = dict_data['list'][0]['coValue'] # 해당 딕셔너리의 coGrade키값을 변수에 지정 o3Value = dict_data['list'][0]['o3Value'] # 해당 딕셔너리의 o3Grade키값을 변수에 지정 no2Value = dict_data['list'][0]['no2Value'] # 해당 딕셔너리의 no2Grade키값을 변수에 지정 pm10Value = dict_data['list'][0]['pm10Value'] # 해당 딕셔너리의 pm10Grade1h키값을 변수에 지정 pm25Value = dict_data['list'][0]['pm25Value'] # 해당 딕셔너리의 pm25Grade1h키값을 변수에 지정 khaiValue = dict_data['list'][0]['khaiValue'] # 해당 딕셔너리의 khaiGrade키값을 변수에 지정 #Unit so2Unit = 'ppm' coUnit = 'ppm' o3Unit = 'ppm' no2Unit = 'ppm' pm10Unit = '㎍/㎥' pm25Unit = '㎍/㎥' khaiUnit = '㎍/㎥' print("[측정소({0})에서 측정된 대기 오염 상태]".format(station_name)) # 문자열에 측정소명을 입력하여 출력 print("- 측정 시간:{0}".format(dataTime)) # 문자열에 측정시간을 입력하여 출력 #line1 print("- [지수] ", end='') print("아황산가스:등급({}),농도({}{})/일산화탄소:등급({}),농도({}{})/오존:등급({}),농도({}{})/이산화질소:등급({}),농도({}{})". format(so2Grade, so2Value, so2Unit, coGrade, coValue, coUnit, o3Grade, o3Value, o3Unit, no2Grade, no2Value, no2Unit))# 문자열에 각각의 변수를 입력하여 출력 #line2 print("- [등급] ", end='') print("미세 먼지:등급({}),농도({}{})/ 초미세먼지:등급({}),농도({}{})/통합대기환경:등급({}),농도({}{})". format(pm10Grade1h, pm10Value, pm10Unit, pm25Grade1h, pm25Value, pm25Unit, khaiGrade, khaiValue, khaiUnit)) # 문자열에 각각의 변수를 입력하여 출력 # - # **[15장: 527 ~ 528페이지]** # + gradeNum2Str = {"1":"좋음", "2":"보통", "3":"나쁨", "4":"매우나쁨" } # 등급을 문자로 변환할 딕셔너리 생성 print("[측정소({0})에서 측정된 대기 오염 상태]".format(station_name)) # 측정소를 입력한 문자열 출력 print("- 측정 시간:{0}".format(dataTime)) # 측정시간을 입력한 문자열 출력 print("- 아황산가스:{},({}{}), 일산화탄소:{},({}{}), 오존:{},({}{}), 이산화질소:{},({}{})". format(gradeNum2Str[so2Grade],so2Value, so2Unit, gradeNum2Str[coGrade], coValue, coUnit, gradeNum2Str[o3Grade], o3Value, o3Unit, gradeNum2Str[no2Grade], no2Value, no2Unit)) print("- 미세 먼지:{},({}{}), 초미세 먼지:{},({}{}), 통합대기환경:{},({}{})". format(gradeNum2Str[pm10Grade1h], pm10Value, pm10Unit, gradeNum2Str[pm25Grade1h], pm25Value, pm25Unit, gradeNum2Str[khaiGrade], khaiValue, khaiUnit)) # - # ## 15.5 정리 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Determine Diversion Flows for a hypothetical Scenario # + [markdown] tags=[] # Created on 1/12/2021 # Designed for => Python 3.7.6, adapted for Jupyter notebook # @author: # Email: # Last: modified ____________ # # #### Script looks at flow from station 401211A for the period of data 1968 to 2021 and determines: # # 1) How many years during the period of record, can we divert the 100GL # target while complying with hypothetical flow thresholds and diversion # rules for this location # # 2) How many days each year is there a flow excedent where water can be # diverted # - # ### Import Packages import pandas as pd import os import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # ### Specify the path to the data file and create an output folder to save the figures # + folder_path = os.getcwd() flow_file = "401211a_historical.csv" # Change to the directory where the .dat file is located os.chdir(folder_path) fig_dir = 'figures' # Create a folder to store the figures if not os.path.exists("figures"): print("create figures folder") os.mkdir(fig_dir) # - # ### Read in the .csv file and create a dataframe df = pd.read_csv(flow_file, index_col=0, header = 2, parse_dates=True) df = df.iloc[2:] df.dropna(how="all", inplace=True) df["Discharge (ML/Day)"] = df["Discharge (ML/Day)"].astype(float) # ### Specify the flow threshold and diversion rules to determine when and how much water can be diverted # # We have two conditions we need to follow: # # We can't divert water if the flow is under 500 MG (ie. flow threshold) # If flow is above 500 MG we can only divert half of the excedent (ie. [flow - flow threshold] * 0.5) # + #Flow threshold (ML) under which NO water can be diverted flow_threshold = 500 # Diversion of flow allowed above threshold diversion_rule = 0.5 conditions = (df["Discharge (ML/Day)"] <= flow_threshold, df["Discharge (ML/Day)"] > flow_threshold) div_values = (0, (df["Discharge (ML/Day)"] - flow_threshold) * diversion_rule) #Create a new column with the calculated diversion values using the conditions above df['Diversion (ML/Day)'] = np.select(conditions, div_values) # - # ### Resample the data to obtain yearly values of the total flows that can be diverted # + tags=[] df_yearly = df.resample('Y').sum() # Rename columns units to ML/year df_yearly.rename(columns={'Discharge (ML/Day)':"Discharge (ML/Year)", "Diversion (ML/Day)" :'Diversion (ML/Year)'}, inplace=True) # - # Now we can determine how many years for this historical dataset we could have complied with the diversion target # + diversion_target = 100 #(GL) conditions = ((df_yearly["Discharge (ML/Year)"]/1000) < flow_threshold, (df_yearly["Diversion (ML/Year)"]/1000) >= diversion_target) div_values = ("no", "yes") # Create a column which tell us if the diversion target was met or not df_yearly['target_met'] = (np.select(conditions, div_values)) # Calculate how many years do we meet the diversion target during these period of record diversion_years = df_yearly['target_met'].value_counts()['yes'] # BONUS QUESTION: How many days during each year can we divert water (flows are above threshold) no_divdays_year = (df["Diversion (ML/Day)" ] > 0).resample('Y').sum().astype(int) # - # ### The number of years from 1968 to 2021 that the diversion target is met is print ((diversion_years)) # We can see how many days each year had flows above the diversion threshold print(no_divdays_year) # ### Create a couple of figures to sumarize the data # + tags=[] # Set up plotting specs plt.rc("axes", linewidth=0.5) plt.rcParams.update({'font.size': 12}) plt.rcParams.update({'legend.labelspacing':0.25}) plt.matplotlib.rc('font', **{'sans-serif' : 'Arial','family' : 'sans-serif'}) plt.rcParams['lines.linewidth'] = 1.0 # + #----------Fig 1 Number of days per year where flow is above threshold fig = plt.figure(figsize=(8, 9)) ax = fig.add_subplot(111) plot1 = no_divdays_year.plot(ax=ax) ax.set_xlabel('Date') ax.set_ylabel('No. days above flow threshold per year') ax.set_title("Station 401211A") plt.savefig(os.path.join(fig_dir, "Total_days_above_threshold(Y)" + '.png'), bbox_inches = "tight", dpi=300) # + #----------Fig 2 Years where the total amount of allowed diverted flows exceed #---------- the diversion target of 100 GL fig = plt.figure(figsize=(8, 9)) ax = fig.add_subplot(111) plot1 = df_yearly["Diversion (ML/Year)"].plot(ax=ax) plt.axhline(y=(diversion_target*1000), color='r', linestyle='-') props = dict(boxstyle='round', facecolor='wheat', alpha=0.95) plt.text(0.405, 0.13, "Diversion Target = 100 GL", transform=ax.transAxes, fontsize=10, bbox=props) ax.set_xlabel('Date') ax.set_ylabel('Total Yearly Allowed Diversion (ML)') ax.set_title("Station 401211A") plt.savefig(os.path.join(fig_dir, "Yearly_allowed_diversion(MG)" + '.png'), bbox_inches = "tight", dpi=300) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Funciones y expresiones booleanas # El paquete `sympy` tiene un módulo de [lógica](http://docs.sympy.org/latest/modules/logic.html). Con él podemos hacer algunas simplificaciones from sympy import * # Definimos los símbolos que vamos a utilizar. Supremo e ínfimo se introducen usando el o e y lógicos. Aunque también podemos utilizar `And` y `Or` como funciones. Para la negación usamos `Not` o `~`. Tenemos además `Xor`, `Nand`, `Implies` (que se puede usar de forma prefija con `>>`) y `Equivalent`. x, y, z = symbols("x,y,z") p = (x | y) & ~ z pprint(p) # La formas normales conjuntiva y disyuntivas las podemos calcular como sigue to_cnf(p) to_dnf(p) # También podemos simplificar expresiones simplify(x | ~x) # O dar valores de verdad a las variables p.xreplace({x:True}) # Esto nos permite crear nuestras propias tablas de verdad p.free_symbols p = Or(x,And(x,y)) from IPython.display import HTML,display colores=['LightCoral','Aquamarine'] tabla="" for t in cartes({True,False}, repeat=2): v =dict(zip((x,y),t)) tabla=tabla+"" tabla=tabla+"
$"+latex(x) tabla=tabla+"$ $"+latex( y)+"$$"+latex(p)+"$
"+str(v[x]) tabla=tabla+""+str(v[y]) tabla=tabla+""+str(p.xreplace(v))+"
" display(HTML(tabla)) # Una forma de comprobar que dos expresiones son equivalentes es la siguiente Equivalent(simplify(p), simplify(x)) # Veamos ahora cómo podemos encontrar la versión simplificada de una función booleana que venga dada por minterms. Aparentemente `SOPform` hace algunas simplificaciones usando el algoritmo de Quine-McCluskey p=SOPform([x,y,z],[[0,0,1],[0,1,0],[0,1,1],[1,1,0],[1,0,0],[1,0,1]]) p # Al utilizar `sympy` podemos escribir una forma más amigable de una expresión booleana pprint(p) # Los comandos `simplify` or `simplify_logic` pueden simplificar aún más pprint(simplify(p)) pprint(simplify_logic(p)) # De hecho, `p` se puede escribir de forma más compacta. Para ello vamos a utilizar el algoritmo espresso, que viene implementado en el paquete `pyeda` from pyeda.inter import * # Este paquete no admite las variables definidas con `symbols`, así que las vamos a declarar con `expvar` para definir variables booleanas x,y,z = map(exprvar,"xyz") p=SOPform([x,y,z],[[0,0,1],[0,1,0],[0,1,1],[1,1,0],[1,0,0],[1,0,1]]) # Otro problema es que la salida de `SOPform` no es una expresión de `pyeda`. Lo podemos arreglar pasándola a cadena de caracteres y releyéndola en `pyeda` p=expr(str(p)) # Ahora sí que podemos utilizar el simplificador *espresso* implementado en `pyeda` pm, =espresso_exprs(p) pm # Y podemos comprobar que es más corta que la salida que daba `sympy`. Para escribirla de forma más "legible" volvemos a utilizar `pprint` de `sympy`, pero para ello necesitamos pasar nuestra expresión en `pyeda` a `sympy` pprint(sympify(pm)) # Podríamos haber definido directamente `p` utilizando tablas de verdad p=truthtable([x,y,z], "01111110") pm, = espresso_tts(p) pprint(sympify(pm)) # La tabla de verdad de una expresión se obtiene como sigue expr2truthtable(pm) # Veamos un ejemplo análogo pero con más variables, y de paso mostramos como definir vectores de variables X = ttvars('x', 4) f = truthtable(X, "0111111111111110") fm, = espresso_tts(f) fm expr2truthtable(fm) # ### Un ejemplo de simplificación # Veamos que el o exclusivo con la definición $x\oplus y=(x\wedge \neg y)\vee (\neg x\wedge y)$ es asociativo x, y, z = map(exprvar,"xyz") f = lambda x,y : Or(And(x,~ y),And(~x,y)) f(x,y) expr2truthtable(f(x,y)) f(x,y).equivalent(Xor(x,y)) # Veamos que efectivamente $x\oplus(y\oplus z)=(x\oplus y)\oplus z$ pprint(simplify_logic(f(x,f(y,z)))) pprint(simplify_logic(f(f(x,y),z))) a= f(f(x,y),z) b= f(x,f(y,z)) a.equivalent(b) # Podemos hacer una función que pase de minterm a expresions en pyeda def minterm2expr(l,v): n = len(l) vv=v.copy() for i in range(n): if not(l[i]): vv[i]=Not(vv[i]) return And(*vv) x,y,z,t = map(exprvar,"xyzt") minterm2expr([0,1,0,1],[x,y,z,t]) def minterms2expr(l,v): return Or(*[minterm2expr(a,v) for a in l]) hh2=minterms2expr([[0,0,0,0],[0,0,1,0],[0,1,0,0],[0,1,1,0],[0,1,1,1],[1,0,0,0],[1,0,1,0],[1,1,0,0]],[x,y,z,t]) hh2 pprint(sympify(hh2)) # Y ahora la podemos simplificar sh2, = espresso_exprs(hh2) sh2 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Step 3: Pattern Confirmation # # 1. [Analysis 1: Specificty and Concreteness](#a1) # 2. [Measuring Specificity using WordNet](#spec) # 2. [Measuring Concreteness using a Crowdsourced Weighted Dictionary](#concrete)

# 3. [Analysis 2: Counting People and Organizations Mentioned using Named Entity Recognition](#ner) #
# # Analysis 1: Specificty and Concreteness import pandas import nltk from nltk import word_tokenize from nltk.corpus import wordnet as wn from nltk.corpus import stopwords import numpy as np import scipy import matplotlib.pyplot as plt import json #Define function to count the number of hypernyms for each noun and verb def specificty(x): x = x.replace('[\x00-\x1f]'," ") text = word_tokenize(x) total_list = [] for w in text: if not wn.synsets(w): pass else: synset = wn.synsets(w) #limit to nouns and verbs, as other words are not arranged hierarchically if ((synset[0].pos() == (wn.NOUN)) or (synset[0].pos() == (wn.VERB))): #I assume the most popular definition of each word. paths = synset[0].hypernym_paths() a_path = [] for num in range(0,len(paths)): a_path.append(len([synset.name for synset in paths[num]])) #I am taking the path with the minimum number of hypernyms, but this could be calculated some other way. path_num = min(a_path) total_list.append( (w, path_num) ) return total_list #Function to calculate the concreteness of a word based on the crowdsourced dictionary def concrete(x, dict): x = x.replace('[\x00-\x1f]'," ") x = x.lower() text = word_tokenize(x) text = [word for word in text if word not in stopwords.words('english')] concrete_score = [] for w in text: if w in dict: concrete_score.append((w, dict[w])) return concrete_score # ### Create Strings # + #first create strings for the two comparison texts, #Kant's The Metaphysical Elements of Ethics #and the Wikipedia page on Germany kant_string = open("../input_data/kant_metaphysics.txt", 'r', encoding='utf-8').read() wiki_string = open("../input_data/wiki_germany.txt", 'r', encoding='utf-8').read() # - #Read in our dataframe to extract text df = pandas.read_csv("../data/comparativewomensmovement_dataset.csv", sep='\t', index_col=0, encoding='utf-8') df # + #concatenate the documents from each organization together, creaing four strings redstockings = df[df['org']=='redstockings'] redstockings_string = ' '.join(str(s) for s in redstockings['text_string'].tolist()) cwlu = df[df['org']=='cwlu'] cwlu_string = ' '.join(str(s) for s in cwlu['text_string'].tolist()) heterodoxy = df[df['org']=='heterodoxy'] heterodoxy_string = ' '.join(str(s) for s in heterodoxy['text_string'].tolist()) hullhouse = df[df['org']=='hullhouse'] hullhouse_string = ' '.join(str(s) for s in hullhouse['text_string'].tolist()) # - # # ## Specificity Score # + #Calculate specificity score for each noun and verb in each string #Creates a list with word and specificity score for each string kant_specificity = specificty(kant_string) wiki_specificity = specificty(wiki_string) redstockings_specificity = specificty(redstockings_string) cwlu_specificity = specificty(cwlu_string) heterodoxy_specificity = specificty(heterodoxy_string) hullhouse_specificity = specificty(hullhouse_string) # - #extract just the specificity score from each list kant_specificity_array = list(int(x[1]) for x in kant_specificity) wiki_specificity_array = list(int(x[1]) for x in wiki_specificity) cwlu_specificity_array = list(int(x[1]) for x in cwlu_specificity) heterodoxy_specificity_array = list(int(x[1]) for x in heterodoxy_specificity) hh_specificity_array = list(int(x[1]) for x in hullhouse_specificity) red_specificity_array = list(int(x[1]) for x in redstockings_specificity) # + #check for a normal distribution fig = plt.figure() ax1 = fig.add_subplot(221) #top left ax2 = fig.add_subplot(222) #top right ax3 = fig.add_subplot(223) #bottom left ax4 = fig.add_subplot(224) #bottom right ax1.hist(heterodoxy_specificity_array, bins=10) ax1.set_title("Heterodoxy") ax2.hist(hh_specificity_array, bins = 10) ax2.set_title("Hull House") ax3.hist(red_specificity_array, bins = 10) ax3.set_title("Redstockings") ax4.hist(cwlu_specificity_array, bins = 10) ax4.set_title("CWLU") plt.tight_layout() plt.show() # - # ### Compare the Distributions # + #print descriptive stats print("Mean Specificity Score for Kant") print(np.mean(kant_specificity_array)) print("Mean Specificity Score for Wikipedia entry on Germany") print(np.mean(wiki_specificity_array)) print("Mean Specificity Score for Heterodoxy") print(np.mean(heterodoxy_specificity_array)) print("Mean Specificity Score for Hull House") print(np.mean(hh_specificity_array)) print("Mean Specificity Score for Redstockings") print(np.mean(red_specificity_array)) print("Mean Specificity Score for CWLU") print(np.mean(cwlu_specificity_array)) # + #create an array for each city, and an array for each wave, for comparrison newyork_specificity_array = red_specificity_array + heterodoxy_specificity_array chicago_specificity_array = cwlu_specificity_array + hh_specificity_array firstwave_specificity_array = hh_specificity_array + heterodoxy_specificity_array secondwave_specificity_array = cwlu_specificity_array + red_specificity_array # - #compare percent difference on the specificity scale (1:18) for the test arrays (np.mean(wiki_specificity_array) - np.mean(kant_specificity_array)) / (max(wiki_specificity_array) - min(kant_specificity_array)) #compare percent difference on the specificity scale (1:18) for the city arrays (np.mean(chicago_specificity_array) - np.mean(newyork_specificity_array)) / (max(chicago_specificity_array) - min(newyork_specificity_array)) #compare percent difference on the specificity scale (1:18) for the wave arrays #note this difference is much smaller than the city-based difference (np.mean(firstwave_specificity_array) - np.mean(secondwave_specificity_array)) / (max(firstwave_specificity_array) - min(secondwave_specificity_array)) #calculate ttest statistics on city and wave arrays #note the statistic is much smaller on the wave-based arrays compared to the city-based arrays print(scipy.stats.ttest_ind(chicago_specificity_array, newyork_specificity_array)) print(scipy.stats.ttest_ind(firstwave_specificity_array, secondwave_specificity_array)) # # ## Concreteness Score #Read in the dictionary created by Brysbaert et al. dict_df = pandas.read_excel("../input_data/Concreteness_ratings_Brysbaert_et_al_BRM.xlsx",sheetname="Sheet1") dict_df = dict_df[dict_df['Bigram']==0] word_dict = dict_df.set_index("Word")['Conc.M'].to_dict() # + #Calculate concreteness score for each noun and verb in each string #Creates a list of tuples, with word and concreteness score for each string kant_concrete = concrete(kant_string, word_dict) wiki_concrete = concrete(wiki_string, word_dict) redstockings_concrete = concrete(redstockings_string, word_dict) cwlu_concrete = concrete(cwlu_string, word_dict) heterodoxy_concrete = concrete(heterodoxy_string, word_dict) hullhouse_concrete = concrete(hullhouse_string, word_dict) # - #extract just the concreteness score from each list kant_concrete_array = list(int(x[1]) for x in kant_concrete) wiki_concrete_array = list(int(x[1]) for x in wiki_concrete) cwlu_concrete_array = list(int(x[1]) for x in cwlu_concrete) heterodoxy_concrete_array = list(int(x[1]) for x in heterodoxy_concrete) hh_concrete_array = list(int(x[1]) for x in hullhouse_concrete) red_concrete_array = list(int(x[1]) for x in redstockings_concrete) # ### Compare the Distributions # + #check for a normal distribution fig2 = plt.figure() ax1 = fig2.add_subplot(221) #top left ax2 = fig2.add_subplot(222) #top right ax3 = fig2.add_subplot(223) #bottom left ax4 = fig2.add_subplot(224) #bottom right ax1.hist(heterodoxy_concrete_array, bins=5) ax1.set_title("Heterodoxy") ax2.hist(hh_concrete_array, bins = 5) ax2.set_title("Hull House") ax3.hist(red_concrete_array, bins = 5) ax3.set_title("Redstockings") ax4.hist(cwlu_concrete_array, bins = 5) ax4.set_title("CWLU") plt.tight_layout() plt.show() # + #print descriptive stats print("Mean Concreteness Score for Kant") print(np.mean(kant_concrete_array)) print("Mean Concreteness Score for Wikipedia entry on Germany") print(np.mean(wiki_concrete_array)) print("Mean Concreteness Score for Heterodoxy") print(np.mean(heterodoxy_concrete_array)) print("Mean Concreteness Score for Hull House") print(np.mean(hh_concrete_array)) print("Mean Concreteness Score for Redstockings") print(np.mean(red_concrete_array)) print("Mean Concreteness Score for CWLU") print(np.mean(cwlu_concrete_array)) # + #create one array for each city newyork_concrete_array = heterodoxy_concrete_array + red_concrete_array chicago_concrete_array = hh_concrete_array + cwlu_concrete_array #create one array for each wave firstwave_concrete_array = heterodoxy_concrete_array + hh_concrete_array secondwave_concrete_array = red_concrete_array + cwlu_concrete_array # - #compare percent difference on the concreteness scale (1:5) for the test arrays (np.mean(wiki_concrete_array) - np.mean(kant_concrete_array)) / (5-1) #compare percent difference on the concreteness scale (1:5) for the city-based arrays (np.mean(chicago_concrete_array) - np.mean(newyork_concrete_array)) / (5-1) #compare percent difference on the concreteness scale (1:5) for the wave-based arrays #notice this percent difference is around half as much as the city-based differernce (np.mean(firstwave_concrete_array) - np.mean(secondwave_concrete_array)) / (5-1) #calculate ttest statistics on the city- and wave-based arrays #note the statistic is more than twice as large for the New York/Chicago comparison versus the first wave/second wave comparison print(scipy.stats.ttest_ind(newyork_concrete_array, chicago_concrete_array)) print(scipy.stats.ttest_ind(firstwave_concrete_array, secondwave_concrete_array)) # # # Analysis 2: Count Organizations and People Mentioned using NER # The below code counts the number of persons and organizations mentioned, and compares across organizations. # # **Note:** Because the published data is sorted, and not the full text, the code below will not reproduce the actual named entities counted. Instead, I will read in the saved named entities, count them, and print the output. # + ############################################################################################# ##Don't run this code to reproduce the named entities. It will not work on the sorted text.## ############################################################################################# def extract_entities(text): #text = text.decode('ascii','ignore') #convert all characters to ascii text = re.sub('[\x00-\x1f]'," ", text) org_list = [] person_list = [] for sent in nltk.sent_tokenize(text): chunked = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sent))) for n in chunked: if isinstance(n, nltk.tree.Tree): if n.label()=="ORGANIZATION": org_list.append(' '.join(c[0] for c in n.leaves())) if n.label()=="PERSON": person_list.append(' '.join(c[0] for c in n.leaves())) return org_list, person_list org_strings = [hullhouse_string, heterodoxy_string, cwlu_string, redstockings_string] for org in org_strings: org_list, person_list = extract_entities(org) myList = [org_list, person_list] filename="../input_data/named_entities_%s.json" % org ##Uncomment the line below to save the named entities as a JSON file #json.dump( myList_cwlu, open( filename, "w", endoding = 'utf-8' ) ) # + ################################################################## ##Count saved named entities to reproduce named entity analysis.## ################################################################## myList_cwlu = json.load( open ("../input_data/named_entities_cwlu.json", "r", encoding='utf-8') ) cwlu_orgs = myList_cwlu[0] cwlu_person = myList_cwlu[1] myList_hh = json.load( open ("../input_data/named_entities_hullhouse.json", "r", encoding = 'utf-8') ) hh_orgs = myList_hh[0] hh_person = myList_hh[1] myList_red = json.load( open ("../input_data/named_entities_redstockings.json", "r"), encoding='utf-8') red_orgs = myList_red[0] red_person = myList_red[1] myList_heterodoxy = json.load( open ("../input_data/named_entities_heterodoxy.json", "r"), encoding='utf-8') heterodoxy_orgs = myList_heterodoxy[0] heterodoxy_person = myList_heterodoxy[1] # + #plots the number of organizations and persons mentioned by each organization import matplotlib.pyplot as plt # data to plot n_groups = 4 num_persons = (len(hh_person), len(heterodoxy_person), len(cwlu_person), len(red_person)) num_orgs = (len(hh_orgs), len(heterodoxy_orgs), len(cwlu_orgs), len(red_orgs)) # create plot fig, ax = plt.subplots() index = np.arange(n_groups) bar_width = 0.35 opacity = 0.8 counts1 = plt.bar(index, num_persons, bar_width, alpha=opacity, color='b', label='Persons') counts2 = plt.bar(index + bar_width, num_orgs, bar_width, alpha=opacity, color='g', label='Organizations') plt.xlabel('Organization') plt.ylabel('Count of Named Entities') plt.title('Count of Named Entities by Organization') plt.xticks(index + bar_width, ('Hull House', 'Heterodoxy', 'CWLU', 'Redstockings')) plt.legend(loc='upper left') plt.tight_layout() plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "subslide"} class Recognizer: def __init__(self,socket): self.socket = socket; self.stop = False; def disconnect(self): self.socket.disconnect() # + slideshow={"slide_type": "subslide"} def train(self): # BASE_DIR = os.path.dirname(os.path.abspath(__file__)) image_dir = os.path.join("./", "images") # face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_profileface.xml') face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml') recognizer = cv2.face.LBPHFaceRecognizer_create() current_id = 0 label_ids = {} y_labels = [] x_train = [] x = True; for root, dirs, files in os.walk(image_dir): # Walking through the dir images recursively print("root:",root,end='\n') print("DIR:",len(dirs),end='\n') print("Files:",len(files)) # For every file in the current walking dir we check if it is png, jpg # and start adding to the lable_id map # sample {'chakri': 0, 'puneeth': 1} print("files length",len(files)) t_detected_faces = 0 count = 0 for file in files: if file.endswith("png") or file.endswith("jpg") or file.endswith("JPG"): #print("file name ",file) path = os.path.join(root, file) label = os.path.basename(root).replace(" ", "-").lower() if not label in label_ids: label_ids[label] = current_id current_id = current_id+1 # take the current id of the person #print("current_id",current_id,"label_ids",label_ids) id_ = label_ids[label] print(id_,end="_*_") # Convert the image into graysacle and resize to 500x500 pil_image = Image.open(path).convert("L") # grayscale size = (300,300) final_image = pil_image.resize(size, Image.ANTIALIAS) # the final image is converted into graysacle and resized to 500x500 # now converted into nummpy array for detecting faces in that image image_array = np.array(final_image, "uint8") # after converting the image into nummpy array for analytics we take that array and # dectect all the faces in the image using Haar Cascade faces = face_cascade.detectMultiScale(image_array, minNeighbors=3) # after detection of the faces t_detected_faces+= len(faces) count+=1 for (x,y,w,h) in faces: roi = image_array[y:y+h, x:x+w] x_train.append(roi) y_labels.append(id_) print("detected faces are ",t_detected_faces) t_detected_faces=0 with open("pickles/face-labels.pickle", 'wb') as f: pickle.dump(label_ids, f) recognizer.train(x_train, np.array(y_labels)) recognizer.save("recognizers/face-trainner.yml") print("COMPLETED TRAINING saved to recognizers/face-trainner.yml") # + slideshow={"slide_type": "subslide"} def recognize(self): print("recognize") face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml') recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read("./recognizers/face-trainner.yml") labels = {"person_name": 1} with open("pickles/face-labels.pickle", 'rb') as f: og_labels = pickle.load(f) labels = {v:k for k,v in og_labels.items()} cap = cv2.VideoCapture(0) # cap1 = cv2.VideoCapture(1) # cap = cv2.VideoCapture("rtsp://192.168.0.5:8080/h264_pcm.sdp") while(True): if self.stop: self.socket.disconnect() break # Capture frame-by-frame ret, frame = cap.read() # faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=3) for (x, y, w, h) in faces: roi_gray = gray[y:y+h, x:x+w] #(ycord_start, ycord_end) roi_color = frame[y:y+h, x:x+w] # recognize? we are using har-calsifiers, But using deep learned model # predict keras tensorflow pytorch scikit learn can be used in future work of this system... id_, conf = recognizer.predict(roi_gray) # with a confidence level between 65% to 100% our system starts pushes the recognized faces to # Web Application using websockets if conf>=65 and conf <= 100: sio.emit('push_faces', {'location': random.choice(locations),'name':labels[id_]}, namespace='/facemap') font = cv2.FONT_HERSHEY_SIMPLEX name = labels[id_] color = (255, 255, 255) stroke = 2 cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA) img_item = "7.png" # cv2.imwrite(img_item, roi_color) color = (0, 255, 0) #BGR 0-255 stroke = 2 end_cord_x = x + w end_cord_y = y + h cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), color, stroke) # Display the resulting frame frame = cv2.resize(frame, (640, 480), interpolation = cv2.INTER_LINEAR) cv2.imshow('frame',frame) if cv2.waitKey(20) & 0xFF == ord('q'): # self.socket.disconnect() break # When everything done, release resources and capture. cap.release() cv2.destroyAllWindows() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Analysing Data import pandas as pd # reading the dataset all_tweets = pd.read_csv('Machine Learning-tweets.csv') # As I scraped 5000 tweets print(len(all_tweets)) # + # Text, No-of-Retweets, Followers, Friends print(len(all_tweets.columns)) # - print(len(all_tweets.loc[0]['Text'])) print(all_tweets.info()) all_tweets.describe() all_tweets.head() all_tweets.tail() # ### Defining the Tweets as Viral or Not import numpy as np all_tweets['is_viral'] = np.where(all_tweets['No-of-Retweets'] > 1000, 1, 0) # 0 means the tweet is not_viral and 1 mean is is_viral all_tweets['is_viral'].value_counts() # ### Feature Engineering # Now finding the features that we think are relevant for our analysis for whether or not the tweet will go viral. We are considering tweet_length, followers_count, friends_count, hashtag_count, http_count, word_count and average_word_len as our features for training of model. # + # apply() function calls the lambda function and applies it to every row or # column of the dataframe and returns a modified copy of the dataframe: # the axis = 1 means the column all_tweets['tweet_length'] = all_tweets.apply(lambda tweet: len(tweet['Text']), axis=1) # - all_tweets['followers_count'] = all_tweets.apply(lambda tweet: tweet['Followers'], axis=1) all_tweets['friends_count'] = all_tweets.apply(lambda tweet: tweet['Friends'], axis=1) all_tweets['hashtags_count'] = all_tweets.apply(lambda tweet: tweet['Text'].count('#'), axis = 1) all_tweets['http_count'] = all_tweets.apply(lambda tweet: tweet['Text'].count('http'), axis = 1) all_tweets['word_count'] = all_tweets.apply(lambda tweet: len(tweet['Text'].split()), axis = 1) all_tweets['average_word_len'] = all_tweets['tweet_length']*1.0 / all_tweets['word_count'] # ### Normalizing the Data # To normalize the data assign names to it i-e to the features and label. data = all_tweets[['tweet_length', 'hashtags_count', 'followers_count', 'friends_count', 'http_count', 'word_count', 'average_word_len']] label = all_tweets['is_viral'] data.head() label.head() # ### Train and Test Splitting # Not split the data into 80% for training and 20% for testing. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.2) # ### KNN Classifier # Training the model with KNN Classifier from sklearn.neighbors import KNeighborsClassifier # n_neighbors means the number of k clf = KNeighborsClassifier(n_neighbors=5) clf.fit(X_train, y_train) clf.score(X_test, y_test) # ### Optimal K # Experimenting with different values of K. Plot them on a graph to see which value of k gives optimal accuracy. # The under-fitting and over-fitting which can happen on choosing too high or two low k values. # This will plot graph vs accuracy for 100 values of k import seaborn as sn score = [] for k in range(1,100): classifier = KNeighborsClassifier(n_neighbors = k) classifier.fit(X_train, y_train) score.append(classifier.score(X_test, y_test)) sn.lineplot(range(1,100),score) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] colab_type="text" id="D_lKr3Avj0Ag" # # Lens API search for institution and range of years or dates # + [markdown] colab_type="text" id="fyrZq73Yj0Ak" # ## Functions and global variables # + [markdown] colab_type="text" id="T4Rt_kx7j0Al" # TODO: # Orcid search: https://www.lens.org/lens/scholar/search/results?q=&orcids=0000-0001-6455-5564 # + colab_type="code" id="Rw3EV881j0Am" outputId="5eb7399d-c4e2-4d2b-e955-5ef7331e8840" colab={"base_uri": "https://localhost:8080/", "height": 103} import getpass import pandas as pd import requests import sys import time import os if os.getcwd()=='/content': # !pip install unidecode import unidecode # + [markdown] colab_type="text" id="TFUR-R4pj0Ap" # # + colab_type="code" id="Nl8_4Hyoj0Aq" outputId="fede12fd-4645-4fe0-e263-602d4b1c8ed9" colab={"base_uri": "https://localhost:8080/", "height": 34} token=getpass.getpass('token:') # + colab_type="code" id="TwCrMmcRj0Au" colab={} url = 'https://api.lens.org/scholarly/search' size=1000 # + colab_type="code" id="vG_EKs5tj0Ay" colab={} def query(affiliation="University of Antioquia",range_type="date_published", date_ini="2003-01-01", date_end="2004-12-31",size=1000): if range_type=="year_published": date_ini=date_ini.split('-')[0] date_end=date_end.split('-')[0] data='''{ "query": { "bool": { "must": [{ "match_phrase":{ "title": "%s" } }, { "range": { "%s": { "gte": "%s", "lte": "%s" } } } ] } }, "size": %i }''' %(affiliation,range_type,date_ini,date_end,size) return data def query_title(title="Dark Matter",range_type="date_published", date_ini="2003-01-01", date_end="2004-12-31",size=1000): """ WARNING: Accents needs to be removed! """ title=unidecode.unidecode(title) if range_type=="year_published": date_ini=date_ini.split('-')[0] date_end=date_end.split('-')[0] data='''{ "query": { "bool": { "must": [{ "match_phrase":{ "title": "%s" } }, { "range": { "%s": { "gte": "%s", "lte": "%s" } } } ] } }, "size": %i }''' %(title,range_type,date_ini,date_end,size) return data # + [markdown] colab_type="text" id="moXfc414j0BM" # ## Full search: University of Antioquia # Maximum number of responses currently is `size=1000` by web is 50000. However the JSON output for the API seem to be more complete: Includes list of citations! no only the number # + colab_type="code" id="aqlXzSCZj0BN" colab={} def get_query(token,data,url='https://api.lens.org/scholarly/search',size=1000): headers = {'Authorization': '{}'.format(token), 'Content-Type': 'application/json'} response = requests.post(url, data=data, headers=headers) if response.status_code != requests.codes.ok: sys.exit(response.status_code) else: return response.text # + colab_type="code" id="yHUIL4RNj0BS" colab={} year_ranges=[["1900","1994"],["1995","2000"],["2001","2003"] ] # + colab_type="code" id="OP3puxdLj0BU" outputId="3db7862d-414d-4689-e96f-f2a089f48ca7" colab={"base_uri": "https://localhost:8080/", "height": 120} udea=pd.DataFrame() for i in range(len(year_ranges)): print(year_ranges[i]) data=query(affiliation="University of Antioquia",range_type="year_published", date_ini=year_ranges[i][0],date_end=year_ranges[i][1],size=1000) r=get_query(token,data) print("query:",pd.read_json(r).shape[0] ) udea=udea.append( pd.read_json(r),sort=True ).reset_index(drop=True) tmp=udea.copy() time.sleep(2) # + colab_type="code" id="8LesJKD1j0BX" colab={} date_ranges=[["2004-01-01","2005-11-30"],["2005-12-01","2006-12-31"],["2007-01-01","2007-12-31"], ["2008-01-01","2008-10-31"],["2008-11-01","2009-05-31"],['2009-06-01','2009-12-31'], ['2010-07-01', '2010-12-31'],['2011-01-01', '2011-05-31'],['2011-06-01', '2011-12-31'], ['2012-01-01', '2012-05-31'],['2012-06-01', '2012-11-30'],['2012-12-01', '2013-03-31'], ['2013-04-01', '2013-10-31'],['2013-11-01', '2014-02-28'],['2014-03-01', '2014-10-31'], ['2014-11-01', '2015-02-28'],['2015-03-01', '2015-10-31'],['2015-11-01', '2016-03-31'], ['2016-04-01', '2016-09-30'],['2016-10-01', '2016-12-31'],['2017-01-01', '2017-06-30'], ['2017-07-01', '2017-12-31'],['2018-01-01', '2018-08-31'],['2018-09-01', '2019-06-30'] ] # + colab_type="code" id="VhzeBRPDj0Bc" colab={} FIND_DATES=False if FIND_DATES: m=["","01-31","02-28","03-31","04-30","05-31","06-30","07-31","08-31","09-30","10-31","11-30","12-31"] dr=date_ranges[0][1].split('-') year="2019-" month=6 if int(dr[1])<12: date_ranges=[[dr[0]+'-'+m[int(dr[1])+1].split('-')[0]+'-'+'01',year+m[month]]] else: date_ranges=[[str(int(dr[0])+1)+'-'+'01'+'-'+'01',year+m[month]]] print( date_ranges ) # + id="LelhGVCRDkhi" colab_type="code" colab={} 1+1 # + id="KDpSIDSYDUaX" colab_type="code" colab={} data=query(affiliation="University of Antioquia",range_type="date_published", date_ini='2018-01-01',date_end='2018-08-31',size=1000) r=get_query(token,data) # + colab_type="code" id="Fbex4g0aj0Be" outputId="c17ab0f4-b2bd-4361-f70d-c391d3478725" colab={"base_uri": "https://localhost:8080/", "height": 974} #WARNING: Failed after some i-th request: fix initial range to continue for i in range(0,len(date_ranges)): print(date_ranges[i]) data=query(affiliation="University of Antioquia",range_type="date_published", date_ini=date_ranges[i][0],date_end=date_ranges[i][1],size=1000) r=get_query(token,data) kk=pd.read_json(r) print("query:",i,kk.shape[0] ) if FIND_DATES: if kk.shape[0]>=1000 and month>1: month=month-1 date_ranges=[[date_ranges[0][0],year+m[month]]] udea=udea.append( kk , sort=True ).reset_index(drop=True) tmp=udea.copy() time.sleep(5) # + colab_type="code" id="aE9AgET5j0Bg" colab={} udea=pd.DataFrame( list( udea.data.values ) ).reset_index(drop=True) # + colab_type="code" id="OrD67YO5j0Bi" outputId="b1453625-cb49-404c-b722-961516180890" colab={"base_uri": "https://localhost:8080/", "height": 34} udea.shape # + colab_type="code" id="v4Ve3oPYj0Bj" colab={} udea.to_json('data/udea.json.gz',compression='gzip') # + [markdown] colab_type="text" id="YlSjAp7ej0Bl" # ## Analysis # + colab_type="code" id="W9KMHUa7j0Bm" outputId="b226044c-d296-47e3-f73f-69bdd8174ada" colab={"base_uri": "https://localhost:8080/", "height": 35} # %pylab inline # + colab_type="code" id="g3SlZp7bj0Bo" colab={} import pandas as pd # + colab_type="code" id="iyzMs6BXj0Bq" outputId="de1fbbe7-ed6e-4b58-ac57-faed0933e343" colab={} # ls data/ # + colab_type="code" id="D10s9_5Lj0Bs" colab={} udea=pd.read_json( 'https://github.com/restrepo/lensapi/raw/master/data/udea.json.gz',compression='gzip')#.reset_index(drop=True) ) # + colab_type="code" id="id4qduLVj0Bu" colab={} #udea=pd.read_json('data/lens_udea_lines_True_orient_records.json.gz',lines=True,orient='records',compression='gzip')#.reset_index(drop=True) # + colab_type="code" id="gIYvyYmLj0Bw" outputId="00598ea8-bfa6-41a1-c657-8b9460a14c99" colab={"base_uri": "https://localhost:8080/", "height": 35} udea.shape # + id="LgCBtgFPdSv0" colab_type="code" outputId="bd4b4782-5a26-4a54-9607-caf65b7c7ab9" colab={"base_uri": "https://localhost:8080/", "height": 318} udea[:2] # + [markdown] colab_type="text" id="-1CuTsL4j0By" # ### Entries with publication date # + colab_type="code" id="ZYJgNBiCj0Bz" outputId="594961b6-ba2d-4a8f-bac5-842ba3493208" colab={"base_uri": "https://localhost:8080/", "height": 235} udea.date_published # + colab_type="code" id="MFkDDv0sj0B1" colab={} udeay=udea[~udea.date_published.isna()] # + colab_type="code" id="Z8SKbQu6j0B3" colab={} udeay['Year']=udea.date_published.apply(lambda s: s.split('-')[0] if type(s)==str else s.get('date').split('-')[0] ) # + colab_type="code" id="wtoo_h1ej0B6" colab={} udeay=udeay[~udeay['Year'].isna()].reset_index(drop=True) udeay['Year']=udeay['Year'].astype(int) # + colab_type="code" id="NNrTL5B0j0B8" outputId="b8b6dec2-6dd2-4648-e8c4-4f9d9651636b" colab={} udeay[udeay['Year']>2007]['Year'].value_counts().sort_index().plot(kind='bar') # + colab_type="code" id="_Gc7Iplyj0B-" outputId="58725c45-560c-492f-f874-14941777ee24" colab={} udeay.shape # + colab_type="code" id="UtmESdNlj0CA" colab={} udeay['doi']=udeay['external_ids'].apply(lambda l:[d.get('value') for d in l if d.get('type')=='doi'] if type(l)==list else None) udeay['doi']=udeay['doi'].apply(lambda l: l[0] if type(l)==list and len(l)>0 else None) # + colab_type="code" id="rUZKbJR4j0CB" colab={} udea_doi=udeay[~udeay['doi'].isna()] # + colab_type="code" id="DTN2anF3j0CD" outputId="08958946-40ca-47e0-8e53-0966067840b9" colab={} udea_doi.shape # + colab_type="code" id="ISOl0XUDj0CG" outputId="3c870ffd-6b12-487b-b3b1-d20b95c42d73" colab={} udea_doi[udea_doi['Year']>2007]['Year'].value_counts().sort_index().plot(kind='bar') # + colab_type="code" id="LlSIap7Ej0CJ" outputId="16d8e8fa-9167-4d66-9139-96654732f789" colab={} minyear=2007 totx=list( udeay[udeay['Year']>minyear]['Year'].value_counts().sort_index().index ) toty=list( udeay[udeay['Year']>minyear]['Year'].value_counts().sort_index().values ) doix=list( udea_doi[udea_doi['Year']>minyear]['Year'].value_counts().sort_index().index ) doiy=list( udea_doi[udea_doi['Year']>minyear]['Year'].value_counts().sort_index().values ) plt.plot(totx,toty,label='TOTAL') plt.plot(totx,toty,'bo') plt.plot(doix,doiy,'r',label='DOI') plt.plot(doix,doiy,'ro') plt.legend(loc='best') plt.xlim(2008,2018.1) plt.title('Producción UDEA') plt.ylabel('Número de artículos cada año',size=15) plt.savefig('totdoi.pdf') # + colab_type="code" id="2kIwBP6pj0CK" outputId="f34f2c90-6560-430d-b6da-2a67c5e8bfdf" colab={} # ls /scratch/restrepo/HUGE/JSON/ # + colab_type="code" id="AspQFy_1j0CM" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Star Trek Causes of Death # ## Data and inspiration from www.thestartrekproject.net # # ## Required Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # ## Read in the Data allDeaths = pd.read_excel("data/all-deaths.xls") print(allDeaths.shape) allDeaths.head() # ## Filter to Just Explore "The Original Star Trek" allDeathsTOS = allDeaths[allDeaths['EpisodeID'].str.contains("tos")] print(allDeathsTOS.size) # ## Group By Cause of Death and Sum the Body Count # + totals = allDeathsTOS.groupby('DeathBy')['BodyCount'].sum() #returned a serires, so build a data frame and then sort in ascending order for plotting later totalDeaths = pd.DataFrame({'DeathBy': totals.index,'TotalBodyCount': totals.values}).sort_values('TotalBodyCount') totalDeaths.tail() # - # ## Build a Spiral Bubble Plot # The concept for this chart is borrowed from http://thestartrekproject.net/files/Star_Trek/ch4/miscellanea-chapter-mockup%2012.pdf # + from bokeh.plotting import figure, output_notebook, show, ColumnDataSource from bokeh.models import HoverTool output_notebook() # spiral parameters a = 0.45 b = 0.15 # bubble size and spacing spacing = 0.01 size=np.log10(1.0+totalDeaths['TotalBodyCount']) # convert bubble size and spacing to arclengths arclength = np.cumsum(2*size+spacing) # solve for polar angle using spiral arclength equation theta = np.log(b*arclength/(a*np.sqrt(1+np.power(b,2))))/b # solve for polar radius using logrithmic spiral equation r = a*np.exp(b*theta) # cartesian x=r * np.cos(theta) y=r * np.sin(theta) # build column data source for bokeh source = ColumnDataSource( data=dict( x=x, y=y, bodyCount=totalDeaths['TotalBodyCount'], size=size, color=["#%02x%02x%02x" % (int(red), int(green), 150) for red, green in zip(np.floor(100+2*x), np.floor(30+2*y))], desc=totalDeaths['DeathBy'].tolist(), ) ) # setup hover tool for contextual labels hover = HoverTool( tooltips=[ ("Body Count", "@bodyCount"), ("Desc", "@desc"), ] ) # create the figure p = figure(plot_width=800, plot_height=800, tools=[hover], title="Death By") # create the bubble scatter plot p.scatter('x', 'y', radius='size', fill_color='color', source=source, fill_alpha=0.8, line_color=None) # display the figure show(p) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Handout 3 (FYD600/GU): Reinforcement learning; one-step Q-learning # ### Authors: , , and # # (2019, revised 2020) # In this project we will look at reinforcement learning (RL), which is a machine learning method that deals with problems where we are trying to find an optimal way of maneuvering in an environment that can give rewards or penalties depending on our actions. Examples are learning to play board games (https://www.youtube.com/watch?v=TnUYcTuZJpM) or computer games (https://youtu.be/cUTMhmVh1qs) or having a robot learn to walk by trial and error (https://www.youtube.com/watch?v=gn4nRCC9TwQ). # # There is a well developed body of theory around these types of problems (see Sutton and Barto book), but in the spirit of this course we will concentrate on a small part of the topic and dig into that with a practical excercise. In particular we will focus on the RL method known as "one-step Q-learning", as it is conceptually quite simple (but powerful) and often used in practice. # # This handout (3) is quite seperate from the supervised learning using artificial neural networks that we studied in handout 1 and 2. But in the last handout (4) we will make the connection by using, so called, deep Q-learning, where an ANN is used. (More on that later.) # ### Basic definitions and theory. # # We consider a type of game where an "agent" takes at each time step an "action" $a$, for example taking a step in some direction or placing a stone on a Go board. Before taking the action, the system or environment that the agent lives in is in some "state" $s$. (I.e. for a game of Go, the state is the configuration of stones on the board.) After the action the environment responds in some way (the agents stone is placed and the opponent places a stone) resulting in a new state of the system $s'$. In the general case the transition $s\rightarrow s'$ would result in some reward $r$ given to the agent (negative or positive or zero). (For the Go game the reward maybe only given at the end, positive if the game is won, negative if it's lost. This is a problem with so called sparse-reward, which is particularly difficult.) # # The objective of the game is to get to the end of the game with the the highest "return", $R$ where return is the cumulative reward. When calculating the return it is customary (and important for many problems) to use a discounting factor $0<\gamma\leq 1$, that implies that reward further on is less valuable than immediate reward. With this, the return at time $t$ is defined as $R_t=\sum_{t'=t}^\infty\gamma^{t'-t}r_{t'}$, where there is an implicit assumption that we are following some particular sequence of actions with the corresponding rewards. # # To decide what action $a$ to take in a given state $s$ the agent uses a policy $\pi(s,a)$, which could correspond to a unique choice or a probability distribution. We are interested in finding the "optimal" policy $\pi_{opt}$, that in any state $s$ will give the maximum return when following it to the end. # # A useful object to quantify the whole proceedure is the "action-value" function $Q(s,a)$. It is defined as the return given by taking $a$ in $s$ and subsequently following the optimal policy, which can be written in an iterative form $Q(s,a)=r+\gamma\max_{a'}Q(s',a')$. Thus, correspondingly, the optimal policy will in fact be the one that in each step uses the action that maximizes $Q$, i.e. to find $\pi_{opt}$ we should calculate $Q$. # # To calculate $Q$ we need to explore the set of states, taking different actions, and collect rewards. There are different ways of doing this, but a standard way is to do "one-step Q-learning" using an $\epsilon$-greedy policy. It goes as follows: # # $\bullet$ in state $s$ use your current estimate of $Q(s,a)$ to decide on a best action $a$ (maximizing $Q$). Use this action with probability $1-\epsilon$, but with probability $\epsilon$ take a random action. $0\leq\epsilon\leq 1$ is a parameter that quantifies a trade off between exploiting the current best estimate of the optimal policy and to explore the full space of possible rewards. (For problems with very big state-action space it is not possible to explore the full space, thus necessary to use a small $\epsilon$, while for small worlds one can even use $\epsilon=1$.) # # $\bullet$ feed action $a$ to the environmment that will respond by moving to state $s'$ and giving reward $r$. # # $\bullet$ this last move provides an estimate # $r+\gamma\max_{a'}Q(s',a')$ for $Q(s,a)$. (Note that this is not necessarily the correct value for $Q(s,a)$ as the next time you take action $a$ in state $s$ you may get a different reward, $Q(s',a')$ may have changed, and you may even end up in a different state $s'$ if there is some stochastic ingredient in moving between states.) Increment your value (estimate) by mixing in some part ($\alpha<1$) of the most recent estimate # $Q(s,a)\leftarrow(1-\alpha)Q(s,a)+\alpha(r+\gamma\max_{a'}Q(s',a'))$. (This is what "one-step" implies; we could have explored further, weighing the rewards of several steps into the upgrade of $Q$.) # # $\bullet$ keep playing,from state $s'$ take another $\epsilon$-greedy action $a'$ to state $s''$ and update $Q(s',a')$. (Restart the game if you reach a terminal state.) Unless you get stuck in some part of the state-action space you will converge to the correct $Q$ function and the corresponding optimal policy after many games. # # Assignment # Construct an RL agent using one-step Q-learning to find the optimal path from start to finish in a "grid world". The grid world is a rectangular board where the agent can move in the cardinal directions. The board is surrounded by sheer cliffs giving death and a large negative reward. There are also "fires" on the board, that are not fatal but gives a large negative reward. In addition, to make things more intersting, there is a treacherous "wind" that can make you go in a completely unintended direction. Every step gives a negative reward; you want to get to goal in as few steps as possible. Reaching goal gives the only positive reward available on the board. # # Some basic functions and definitions are provided. But you need to set up the training and test it by tuning parameters. # Here we will give you some help with constructing the code. We will provide you with a template, but you do not have to use it. You should find the most suitable parameters yourself. However, use wind=0.1, the probability of taking a random step, unless otherwise specified. import numpy as np import matplotlib import matplotlib.pyplot as plt from mpl_toolkits import mplot3d # Define parameters (you should find suitable values) alpha= 0.5 #learning rate gamma= 0.3 #discounting rate epsilon= 0.3 #for the epsilon-greedy policy wind=0.0 # chance that action results in random move # + # Setup grid startpos=np.array([0,19]) goalpos=np.array([10,0]) size_grid_x=30 size_grid_y=50 # Setup Q matrix (size,size,4) matrix for the 4 actions up,down,left,right=0,1,2,3 Q=np.zeros((size_grid_x,size_grid_y,4)) # - # The grid will consist of squares of "fire" and a "goal". The edge of the grid will be considered a "cliff". The game ends if you end up at the goal, which will yield a reward of 100. The game also ends if you fall over the cliff resulting in a -100 reward. You can walk through fire, but it yields a negative reward of -50 for each step. Every other step yields a -1 reward, meaning that we want to find the shortest path to the goal. # # It is convenient to use python's dictionaries to associate the different squares of the grid with the values specified above. A dictionary is a list which associate an output to a label that you specify. This should be clear from the following definitions # the reward associated with each obstacle reward_list = {'fire': -50 , 'cliff': -100, 'goal': 100 , 'start' : -1 , '-' : -1 } # the state of the game: 1 meaning the game has ended state_list = {'fire': -1 , 'cliff': 1, 'goal': 1 , 'start' : -1 , '-' : -1 } # from which you can extract the values according to (note the [ ] brackets) reward_list['fire'] # Next we define a function that constructs the grid (it will be a nested list, not a numpy array) # Make grid def make_grid(): # Add padding, add an extra row, accessible trough index -1 grid = [['-' for y in range(size_grid_y+1)] for x in range(size_grid_x+1)] for x in range(size_grid_x+1): for y in range(size_grid_y+1): #goal if x==goalpos[0] and y==goalpos[1] : # store object grid[x][y] = 'goal' #cliff if x==size_grid_x or y==size_grid_y : grid[x][y] = 'cliff' #fires elif (3<= x <=7) and (3<= y <= 7): grid[x][y] = 'fire' elif x==startpos[0] and y==startpos[1] : grid[x][y] = 'start' return grid # Let us make a function that can plot this grid in a nice way # Plot grid, pass None if you do not want to print a path def dispGrid(grid): grid_RGB = np.ones((size_grid_x,size_grid_y,3))*0.7 # for x in range(size_grid_x): for y in range(size_grid_y): if grid[x][y]=='goal': grid_RGB[x,y,:] = np.array([245/255,237/255,48/255]) # Yellow if grid[x][y]=='fire': grid_RGB[x,y,:] = np.array([203/255,32/255,40/255]) # Red '-' #pit if grid[x][y]=='start': grid_RGB[x,y,:] = np.array([0/255,254/255,0/255]) # Red '-' #pit return grid_RGB # + # make the grid grid = make_grid() # convert it to colors plot_grid =dispGrid(grid) fig=plt.figure(figsize=(10, 10), dpi= 80, facecolor='w', edgecolor='k') # We have to invert the x and y axis , go over to numpy array instead plt.imshow(np.swapaxes(np.array(plot_grid),0,1)) #plt.axis('on') plt.gca().invert_yaxis() plt.xticks(np.arange(0, size_grid_x, dtype=np.int)) plt.yticks(np.arange(0, size_grid_y, dtype=np.int)) plt.show() # - # It helps to write a function action_fcn(q_in,epsilon,wind) which takes as input # q_in , epsilon, wind and gives move, action as output. # # The variable q_in is the value for the four different actions 0,1,2,3 (corresponding to up,down,left,right) at a particular position of the $Q$-matrix, i.e. it is an array with four entries. The output action should be the action taken according to the epsilon greedy procedure, i.e. 0,1,2 or 3. However, we will also include wind which means that the actual move taken by the player has a probability wind to be just a random move in any direction. Let move be the actual move preformed when wind is taken into account. # # + import random def action_fcn(q_in,epsilon,wind): #Find action accordning to epsilon greedy if random.random() < epsilon: action = random.randint(0,3) else: action = np.argmax(q_in) #With "wind" there is a chance that the action gives a different reaction, i.e. move. #Construct move with a probability (set by the parameter wind) to be just a step if random.random() < wind: move = random.randint(0,3) else: move = action #Outout return move, action # - # Now it is time to construct the main loop of the program. Loop through a big number of games, try 100000. Run each game until you go outside the grid, the 'cliff', or if you reach the goal. # + up,down,left,right=0,1,2,3 tot_goals = 0 # loop over games for games in range(10): #100000 # Initilize the player at the start position and store the current position in position position = startpos.copy() state = state_list[str(grid[position[0]][position[1]])] # loop over steps taken by the player while state == -1: #the state of the game is -1, terminate if 1 (see state_list above) # Find out what move to make using old_location = position.copy() print(Q[position[0]][position[1]]) move, action = action_fcn(Q[position[0]][position[1]], epsilon, wind) # update location if move == up: position[1] += 1 elif move == down: position[1] -= 1 elif move == left: position[0] -= 1 elif move == right: position[0] += 1 # Update Q. Remember that it is the variable action that determines what entry of the Q-matris that should be # updated, not move state = state_list[str(grid[position[0]][position[1]])] if state == -1: r = reward_list[str(grid[position[0]][position[1]])] Q[old_location[0], old_location[1], action] = (1 - alpha) * Q[old_location[0], old_location[1], action] + alpha * (r + gamma * max(Q[position[0], position[1], :])) if str(grid[position[0]][position[1]]) == 'goal': print('Awesome, game: ' + str(games)) tot_goals += 1 print('Total: ' + str(tot_goals)) # - # This is the basic structure of the Q-learning. Now you should have constructed a $Q$ matrix which stores the value of making any of the four different actions for each state. # ## Questions: # Answer the following questions: # # $\bullet$ Construct the Q-learning algorithm according to the above. Find suitable values for alpha, gamma, epsilon. Observations? # # $\bullet$ There is a fundamental difference between the two random components $\epsilon$ and wind. What? # # $\bullet$ Experiment and disuss the difference bwetween using an $\epsilon$ close to 0 or close to 1. # # $\bullet$ The $Q$-matrix contains all information about the optimal way to play this game. Find the optimal path to move according the $Q$-matrix, i.e. in every step make the move with highest value. Plot the optimal path on the grid by including it in the dispGrid(grid) function. What happens if you set wind=0 during training and why? # # $\bullet$ Plot the state-value function $V(s)$. The state-value function is the biggest Q-value of actions for each state i.e. $V(s) = \text{max}_a Q(s,a)$. To plot this function, use plt.imshow in the Matplotlib package. What does this object signify? Compare it to the plot given above. Why is the "fire" smaller in this picture? # # # $\bullet$ Run the subsequent routines to diagnose the outcome, and discuss the results. # # ## Diagnosis and plot routines #What does it show? z=Q.max(axis=2) #z=z.T #z=np.flip(z,axis=0) #z=np.flip(z,axis=1) plt.figure() fig=plt.figure(figsize=(10, 10), dpi= 80, facecolor='w', edgecolor='k') plt.imshow(np.swapaxes(z,0,1)) plt.colorbar() #plt.grid(False) plt.gca().invert_yaxis() plt.xticks(np.arange(0, size_grid_x, dtype=np.int)) plt.yticks(np.arange(0, size_grid_y, dtype=np.int)) plt.show() # + #a path location=startpos path=np.array([startpos]) reaction=0 step_count=0; while reaction !=1: q_state=Q[location[0],location[1],:] step_count+=1 move, action = action_fcn(q_state,0.0,0.0) #take optimal move location_new=location+move obstacle = grid[location_new[0]][location_new[1]] reaction = state_list[obstacle] location=location_new path=np.append(path,[location],axis=0) if step_count>100: break print(path,step_count) # + #plot above path grid = make_grid() plot_grid =dispGrid(grid,path) fig=plt.figure(figsize=(10, 10), dpi= 80, facecolor='w', edgecolor='k') # We have to invert the x and y axis , go over to numpy array instead plt.imshow(np.swapaxes(np.array(plot_grid),0,1)) #plt.axis('on') plt.gca().invert_yaxis() plt.xticks(np.arange(0, size_grid_x, dtype=np.int)) plt.yticks(np.arange(0, size_grid_y, dtype=np.int)) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.insert(0, "../") import pyErrorPred import numpy as np # + # Get filenames of original pdb and predictions predname = '/projects/ml/for/docking/output_pdbs/longxing_HEEH_14976_000000014_0001_0001.npz' pdbname = '/projects/ml/for/docking/pdbs/longxing_HEEH_14976_000000014_0001_0001.pdb' # Load prediction pred = np.load(predname) # Get inter and intra interaction masks # Returns interface map, chainA map, and chainB map imap, [map1, map2] = pyErrorPred.get_interaction_map(pdbname) # + # Good for analyzing monomer global_lddt = np.mean(pyErrorPred.get_lddt(pred["estogram"], pred["mask"])) # Good for analyzing binder + target interface_lddt = np.mean(pyErrorPred.get_lddt(pred["estogram"], np.multiply(imap, pred["mask"]))) chainA_lddt = np.mean(pyErrorPred.get_lddt(pred["estogram"], np.multiply(map1, pred["mask"]))) chainB_lddt = np.mean(pyErrorPred.get_lddt(pred["estogram"], np.multiply(map2, pred["mask"]))) # Best metric for binder + target (with chainA as binder of course) score = interface_lddt+chainA_lddt # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Background # # > 在今天的深度学习世界中,如果数据是王者,那么确保它的格式正确可能只是皇后,但这非常重要。在努力收集图像并注释所有对象之后,您必须确定将用于存储所有信息的格式。与您必须担心的所有其他事项相比,这似乎不是一个重大决定,但如果您想快速了解不同模型对您的数据的执行情况,那么让这一步骤正确至关重要。 # # > 早在2014年,Microsoft就创建了一个名为COCO(COntext中的公共对象)的数据集,以帮助推进对象识别和场景理解的研究。COCO是首批使用多个边界框注释对象的大型数据集之一,因此它成为测试新检测模型时使用的流行基准。COCO用于存储注释的格式从此成为事实上的标准,如果您可以将数据集转换为其样式,则会打开一整套最先进的模型实现。 # # > 这就是pycococreator的用武之地.pycococreator负责处理所有注释格式的详细信息,并将有助于将您的数据转换为COCO格式。 # ## Data structure # # # # shapes # │ # └───train # │ # └───annotations # │ │ __.png # │ │ ... # │ # └─── # │ .jpeg # │ ... # # # # ## File content structure # # ### file name : instances_shapes_validate2018.json # # { # "info": { # "description": "Example Dataset", # "url": "https://github.com/waspinator/pycococreator", # "version": "0.1.0", # "year": 2018, # "contributor": "waspinator", # "date_created": "2018-04-21 05:49:53.923111" # }, # "licenses": [ # { # "id": 1, # "name": "Attribution-NonCommercial-ShareAlike License", # "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/" # } # ], # "categories": [ # { # "id": 1, # "name": "square", # "supercategory": "shape" # }, # { # "id": 2, # "name": "circle", # "supercategory": "shape" # }, # { # "id": 3, # "name": "triangle", # "supercategory": "shape" # } # ], # "images": [ # { # "id": 1, # "file_name": "1038.jpeg", # "width": 512, # "height": 512, # "date_captured": "2018-04-21 05:49:53.922945", # "license": 1, # "coco_url": "", # "flickr_url": "" # } # ], # "annotations": [ # { # "id": 1, # "image_id": 1, # "category_id": 3, # "iscrowd": 0, # "area": 9139, # "bbox": [ # 0.0, # 64.0, # 133.0, # 127.0 # ], # "segmentation": [ # [ # 132.0, # 190.5, # 0, # 190.0, # 0, # 169.0, # 60.0, # 63.5, # 132.0, # 190.5 # ] # ], # "width": 512, # "height": 512 # } # ] # } # # ## references and resources # #

pycococreator

# # # # # # # + import cv2 import numpy as np import math import matplotlib.pyplot as plt # %matplotlib inline fig=plt.figure(figsize=(18, 18), dpi= 180, facecolor='w', edgecolor='k') class Hog_descriptor(): def __init__(self, img, cell_size=16, bin_size=8): self.img = img self.img = np.sqrt(img // float(np.max(img))) self.img = self.img * 255 self.cell_size = cell_size self.bin_size = bin_size self.angle_unit = 360 // self.bin_size assert type(self.bin_size) == int, "bin_size should be integer," assert type(self.cell_size) == int, "cell_size should be integer," #assert type(self.angle_unit) == int, "bin_size should be divisible by 360" def extract(self): height, width = self.img.shape gradient_magnitude, gradient_angle = self.global_gradient() gradient_magnitude = abs(gradient_magnitude) cell_gradient_vector = np.zeros((height // self.cell_size, width // self.cell_size, self.bin_size)) for i in range(cell_gradient_vector.shape[0]): for j in range(cell_gradient_vector.shape[1]): cell_magnitude = gradient_magnitude[i * self.cell_size:(i + 1) * self.cell_size, j * self.cell_size:(j + 1) * self.cell_size] cell_angle = gradient_angle[i * self.cell_size:(i + 1) * self.cell_size, j * self.cell_size:(j + 1) * self.cell_size] cell_gradient_vector[i][j] = self.cell_gradient(cell_magnitude, cell_angle) hog_image = self.render_gradient(np.zeros([height, width]), cell_gradient_vector) hog_vector = [] for i in range(cell_gradient_vector.shape[0] - 1): for j in range(cell_gradient_vector.shape[1] - 1): block_vector = [] block_vector.extend(cell_gradient_vector[i][j]) block_vector.extend(cell_gradient_vector[i][j + 1]) block_vector.extend(cell_gradient_vector[i + 1][j]) block_vector.extend(cell_gradient_vector[i + 1][j + 1]) mag = lambda vector: math.sqrt(sum(i ** 2 for i in vector)) magnitude = mag(block_vector) if magnitude != 0: normalize = lambda block_vector, magnitude: [element / magnitude for element in block_vector] block_vector = normalize(block_vector, magnitude) hog_vector.append(block_vector) return hog_vector, hog_image def global_gradient(self): gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5) gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5) gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0) gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True) return gradient_magnitude, gradient_angle def cell_gradient(self, cell_magnitude, cell_angle): orientation_centers = [0] * self.bin_size for i in range(cell_magnitude.shape[0]): for j in range(cell_magnitude.shape[1]): gradient_strength = cell_magnitude[i][j] gradient_angle = cell_angle[i][j] min_angle, max_angle, mod = self.get_closest_bins(gradient_angle) orientation_centers[min_angle] += (gradient_strength * (1 - (mod / self.angle_unit))) orientation_centers[max_angle] += (gradient_strength * (mod / self.angle_unit)) return orientation_centers def get_closest_bins(self, gradient_angle): idx = int(gradient_angle / self.angle_unit) mod = gradient_angle % self.angle_unit if idx == self.bin_size: return idx - 1, (idx) % self.bin_size, mod return idx, (idx + 1) % self.bin_size, mod def render_gradient(self, image, cell_gradient): cell_width = self.cell_size / 2 max_mag = np.array(cell_gradient).max() for x in range(cell_gradient.shape[0]): for y in range(cell_gradient.shape[1]): cell_grad = cell_gradient[x][y] cell_grad /= max_mag angle = 0 angle_gap = self.angle_unit for magnitude in cell_grad: angle_radian = math.radians(angle) x1 = int(x * self.cell_size + magnitude * cell_width * math.cos(angle_radian)) y1 = int(y * self.cell_size + magnitude * cell_width * math.sin(angle_radian)) x2 = int(x * self.cell_size - magnitude * cell_width * math.cos(angle_radian)) y2 = int(y * self.cell_size - magnitude * cell_width * math.sin(angle_radian)) cv2.line(image, (y1, x1), (y2, x2), int(255 * math.sqrt(magnitude))) angle += angle_gap return image img = cv2.imread('../test.png', cv2.IMREAD_GRAYSCALE) hog = Hog_descriptor(img, cell_size=8, bin_size=8) vector, image = hog.extract() plt.imshow(image, cmap=plt.cm.gray) plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bond Graph Tools Tutorial 1 # # ## Part 1: Installing BondGraphTools # # ### Setup: # Install BondGraphTools v0.3 from PyPI using `pip install BondGraphTools` # ### Import: # Import the BondGraphTools package. Initially, we will be using the new, draw and simulate functions. # # This may take a while on first import, as it will download and build the required Julia libraries. from BondGraphTools import new, draw, simulate # ## Part 2: Basic modelling # # We're going to build and simulate a simple resistor-capacitor system. # # All models in BondGraphTools are 'components', and components are created with `new`. # Create a new model and name it 'RC' using the following command: model = new(name='RC') # Let us also make a new capacitor with a capacitance of 1 Farad, a 1 Ohm resistor, and a conservation law which we want them to obey (ie; a common voltage). C = new("C", value=1) R = new("R", value=1) KCL = new("0") # We need to now add the components to our model, and then to wire them up. from BondGraphTools import add, connect add(model, R,C,KCL) connect(R,KCL) connect(C,KCL) # We can display a basic visualisation of the model using the draw command. fig = draw(model) import matplotlib.pyplot as plt plt.savefig("RC_1.svg", pad_inches=0, bbox_inches="tight") # In order to run a simulation, we need to know what the state variables of the system are, so as to select the initial conditions. # # We can find this out via: # # (Note that 'q_i' corresponds to position-like variables, and 'p_i' correspond to momentum-like variables) model.state_vars # Here we clearly have one state variable `x_0` which corresponds to the accumulated charge. # # Hence, our state space is one dimensional. # Let us define intial values and an simulation window, then simulate. timespan = [0, 5] x0 = {'x_0':1} t, x = simulate(model, timespan=timespan, x0=x0) # and plot the results in a matplotlib figure. from matplotlib.pyplot import plot fig = plot(t,x) plt.savefig("RC_2.svg", pad_inches=0, bbox_inches="tight") # Is this right? # Try to derive this by hand and then verify that your derivation has the same constitutive relation as the model. # # The constitutive relaiton $\Phi(x)=0$ can be produced via: # ## Part 3: Basic Control # # Now, let us add some additional components. # In particular, we want to add a modulated current source. Sf = new('Sf') add(model, Sf) connect(Sf, KCL) draw(model) plt.savefig("RC_3.svg", pad_inches=0, bbox_inches="tight") # Since we've not specified the value of the `Sf` component, it has been added to the control variables: # # `u_0` model.control_vars # Lets also check the new system relation: model.constitutive_relations # First; lets suppose a constant current: t, x = simulate(model, timespan=timespan, x0=x0, control_vars={'u_0':2}) plot(t,x) plt.savefig("RC_4.svg", pad_inches=0, bbox_inches="tight") # Next, let's suppose we drive it with a sinusional function. t, x = simulate(model, timespan=timespan, x0=x0, control_vars={'u_0':'sin(2*t)'}) plot(t,x) plt.savefig("RC_5.svg", pad_inches=0, bbox_inches="tight") # A step function? step_fn = 't < 1 ? 1 : 0' # if t < 0 then 1 else 0 t, x = simulate(model, timespan=timespan, x0=x0, control_vars={'u_0':step_fn}) plot(t,x) plt.savefig("RC_6.svg", pad_inches=0, bbox_inches="tight") # One can also use loops to do parameter sweeps # + import matplotlib.pyplot as plt fig = plt.figure() for i in range(4): func_text = "cos({i}t)".format(i=i) t_i, x_i = simulate(model, timespan=timespan, x0=x0, control_vars={'u_0':func_text}) plot(t_i,x_i) plt.savefig("RC_7.svg", pad_inches=0, bbox_inches="tight") # - # # ## Part 4: ...and when it breaks. # # ### What to do! # 1. Restart your kernal and try again. # 2. Come talk to me in my office or via email # 3. If it is a bug: Add an issue to the github issue list, or email me. Please inculde all your code. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() centrals = pd.read_csv("data/hlist_1.00035_centralhalos.csv") sats = pd.read_csv("data/hlist_1.00035_satellitehalos_1.0Rvir.csv") plt.figure(figsize=(8, 8)) plt.xlabel(r'$\rm Halo\ Mass\ (M_{vir})$') sns.distplot(np.log10(centrals['mvir(10)'])) plt.show() plt.figure(figsize=(8, 8)) plt.xlabel(r'$\rm Halo\ Mass\ (M_{vir})$') sns.distplot(np.log10(sats['mvir(10)'])) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.7 64-bit # name: python3 # --- import os import os.path as path import numpy as np import pandas as pd import matplotlib.pyplot as plt from tensorflow.keras import layers, models, optimizers, regularizers from tensorflow.keras.models import load_model current_dir = os.path.join(os.getcwd()) file = os.path.join(path.dirname(path.dirname(current_dir)), "generate_data\data_cwa.csv") myData = pd.read_csv(file, delimiter=',', usecols=['cwa','credit','time','difficulty', 'score']) my_data_copy = myData myData.shape myData["score"] = myData["score"].values / 100 myData["cwa"] = myData["cwa"].values / 100 myData["credit"] = myData["credit"].values / 10 myData ["difficulty"] = myData['difficulty'].values / 5 myData["time"] = myData["time"].values / 6 # + df = pd.DataFrame(myData) df = df.sample(frac=1) myData = df # + targets = myData[['time']].values myData.drop(('time'), axis=1, inplace=True) data = myData.values print(targets.shape) print(data.shape) # + # num_train = int(0.5 * len(data)) # num_val = int(0.25 * len(data)) # num_test = int(0.25 * len(data)) # + # train_data = data[0 : num_train] # test_data = data[num_train: num_train + num_test] # val_data = data[num_train + num_test:] # train_targets = targets[0 : num_train] # test_targets = targets[num_train: num_train + num_test] # val_targets = targets[num_train + num_test:] train_data = data[0 : 2000] test_data = data[2000: 3000] val_data = data[3000:4000] train_targets = targets[0 : 2000] test_targets = targets[2000: 3000] val_targets = targets[3000 :4000] print(len(train_data) + len(test_data) + len(val_data)) print(len(train_targets) + len(test_targets) + len(val_targets)) # + model = models.Sequential() model.add(layers.Dense(64, activation="relu", input_shape=(train_data.shape[1],))) # model.add(layers.Dropout(0.5)) model.add(layers.Dense(64, activation="relu")) # model.add(layers.Dropout(0.5)) model.add(layers.Dense(64, activation="relu")) # model.add(layers.Dropout(0.5)) model.add(layers.Dense(1)) model.summary() # - model.compile( optimizer=optimizers.RMSprop(learning_rate=2e-4), loss="mse", metrics=['mae'] ) history = model.fit(train_data, train_targets, epochs=50, batch_size=100, validation_data=(val_data, val_targets) ) acc = history.history['mae'] val_acc = history.history['val_mae'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() test_mse_score, test_mae_score = model.evaluate(test_data, test_targets) model.save('time_prediction.h5') predicted = model.predict([[0.8771, 0.4, 0.6, 0.82]]) predicted # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # # ___ # # NLP Basics Assessment # For this assessment we'll be using the short story [_An Occurrence at Owl Creek Bridge_](https://en.wikipedia.org/wiki/An_Occurrence_at_Owl_Creek_Bridge) by (1890).
The story is in the public domain; the text file was obtained from [Project Gutenberg](https://www.gutenberg.org/ebooks/375.txt.utf-8). # RUN THIS CELL to perform standard imports: import spacy nlp = spacy.load('en_core_web_sm') # **1. Create a Doc object from the file `owlcreek.txt`**
# > HINT: Use `with open('../TextFiles/owlcreek.txt') as f:` # Enter your code here: with open('../Text files/owlcreek.txt') as f: doc = nlp(f.read()) # + # Run this cell to verify it worked: doc[:36] # - # **2. How many tokens are contained in the file?** len(doc) # **3. How many sentences are contained in the file?**
HINT: You'll want to build a list first! # + #*for sentence in doc.sents: # print(sentence) #len(sentence) doc_sentences = [sents for sents in doc.sents] print(doc_sentences) len(doc_sentences) # - # **4. Print the second sentence in the document**
HINT: Indexing starts at zero, and the title counts as the first sentence. #doc[13:36] print(doc_sentences[2].text) # ** 5. For each token in the sentence above, print its `text`, `POS` tag, `dep` tag and `lemma`
# CHALLENGE: Have values line up in columns in the print output.** # + doc2 = doc[13:36] doc5 = print(doc_sentences[2].text) def show_lemmas(doc2): for token in doc2: print(f'{token.text:{12}} {token.pos_:{6}} {token.lemma:<{22}} {token.lemma_:}') print('\n\n\n') show_lemmas(doc2) for token in doc_sentences[2]: print(f'{token.text:{12}} {token.pos_:{6}} {token.dep_:<{20}} {token.dep:<{20}} {token.lemma:<{22}} {token.lemma_:}') # + # NORMAL SOLUTION: # + # CHALLENGE SOLUTION: # - # **6. Write a matcher called 'Swimming' that finds both occurrences of the phrase "swimming vigorously" in the text**
# HINT: You should include an `'IS_SPACE': True` pattern between the two words! # + # Import the Matcher library: from spacy.matcher import Matcher matcher = Matcher(nlp.vocab) # + # Create a pattern and add it to matcher: #SwimmingVigorously pattern1 = [{'LOWER': 'swimmingvigorously'}] #Swimming-Vigorously pattern2 = [{'LOWER':'swimming'},{'IS_PUNCT':True, 'OP':'*'},{'LOWER':'vigorously'}] #swimming vigorously pattern3 = [{'LOWER':'swimming'},{'IS_SPACE':True, 'OP':'*'},{'LOWER':'vigorously'}] #add matcher.add("Swimming Vigorously", None, pattern1, pattern2, pattern3) # + # Create a list of matches called "found_matches" and print the list: found_matches = matcher(doc) print(found_matches) # - # **7. Print the text surrounding each found match** # + ##for match_id, start, end in found_matches: ##string_id = nlp.vocab.strings[match_id] #adquirir a representação da string span = doc[1265:1290] #adquirir em qual linha iniciou e finalizou a palavra identificada aderindo CONTEXTO print(span.text) # - #Professor def surrounding(doc,start,end): print(doc[start-9:end+13]) surrounding(doc,1274,1277) string_id = nlp.vocab.strings[match_id] #adquirir a representação da string span = doc[3602:3617] #adquirir em qual linha iniciou e finalizou a palavra identificada aderindo CONTEXTO print(span.text) #Professor def surrounding2(doc,start,end): print(doc[start-7:end]) # + surrounding2(doc,3602,3617) # - # **EXTRA CREDIT:
Print the *sentence* that contains each found match** for sentence in doc_sentences: if found_matches[0][1] 4.4 else 4.4 polyY1 = polyX1 + 1 polyY2 = 3 / 2. * polyX1 + 3 y_lim = polyY2 if polyY2 > 10 else 10 x = [0, polyX1, polyX1, 0] y = [1, polyY1, polyY2, 3] scale = 1 poly = Polygon(np.c_[x, y] * scale, facecolor='#1B9AAA', edgecolor='#1B9AAA', alpha=0.5) ax.add_patch(poly) ax.plot(xd, yd, marker="o", c="#e86a92", label=legend, linestyle='') ax.plot(0, 4.187999875999753, label="logistic", marker='+', c='black') ax.plot(0, 1.7962675925351856, label="uniform", marker='^', c='black') ax.plot(4, 9, label="exponential", marker='s', c='black') ax.plot(0, 3, label="normal", marker='*', c='black') ax.plot(np.arange(0, polyX1, 0.1), 3 / 2. * np.arange(0, polyX1, 0.1) + 3, label="gamma", linestyle='-', c='black') ax.plot(np.arange(0, polyX1, 0.1), 2 * np.arange(0, polyX1, 0.1) + 3, label="lognormal", linestyle='-.', c='black') ax.legend() ax.set_ylim(y_lim, 0) ax.set_xlim(-0.2, polyX1) plt.xlabel("Skewness²") plt.title(title + ": Cullen and Frey map") plt.ylabel("Kurtosis") plt.savefig(title + legend + "cullenfrey.png") plt.show() def makespaces(s2, k, alpha, beta, legend, title): kk = pd.DataFrame({'Skew²': s2, 'Kurtosis': k, 'Alpha': alpha, 'Beta': beta}) K = 8 model = KMeans() visualizer = KElbowVisualizer(model, k=(1, K)) kIdx = visualizer.fit(kk.drop(columns="Beta")) visualizer.show() kIdx = kIdx.elbow_value_ model = KMeans(n_clusters=kIdx).fit(kk.drop(columns="Beta")) fig = plt.figure() ax = Axes3D(fig) cmap = plt.get_cmap('gnuplot') clr = [cmap(i) for i in np.linspace(0, 1, kIdx)] for i in range(0, kIdx): ind = (model.labels_ == i) ax.scatter(kk["Skew²"][ind], kk["Kurtosis"][ind], kk["Alpha"][ind], s=30, c=clr[i], label='Cluster %d' % i) ax.set_xlabel("Skew²") ax.set_ylabel("Kurtosis") ax.set_zlabel(r"$\alpha$") ax.legend() plt.title(title + ": EDF-K-means") plt.show() model = KMeans() visualizer = KElbowVisualizer(model, k=(1, K)) kIdx = visualizer.fit(kk.drop(columns="Alpha")) visualizer.show() kIdx = kIdx.elbow_value_ model = KMeans(n_clusters=kIdx).fit(kk.drop(columns="Alpha")) fig = plt.figure() ax = Axes3D(fig) cmap = plt.get_cmap('gnuplot') clr = [cmap(i) for i in np.linspace(0, 1, kIdx)] for i in range(0, kIdx): ind = (model.labels_ == i) ax.scatter(kk["Skew²"][ind], kk["Kurtosis"][ind], kk["Beta"][ind], s=30, c=clr[i], label='Cluster %d' % i) ax.set_xlabel("Skew²") ax.set_ylabel("Kurtosis") ax.set_zlabel(r"$\beta$") ax.legend() plt.title(title + ": EPSB-K-means") plt.show() def makespaces62(s2, k, alpha, beta, legend, title, ilist): kk = pd.DataFrame({'Skew²': s2, 'Kurtosis': k, 'Alpha': alpha, 'Beta': beta, "Entity": ilist}) K = 8 model = KMeans() visualizer = KElbowVisualizer(model, k=(1, K)) kIdx = visualizer.fit(kk.drop(columns=["Beta", "Entity"])) visualizer.show() kIdx = kIdx.elbow_value_ model = KMeans(n_clusters=kIdx).fit(kk.drop(columns=["Beta", "Entity"])) print(len(model.labels_)) fig = plt.figure(figsize=(20, 15)) ax = Axes3D(fig) cmap = plt.get_cmap('gnuplot') ilist2 = list(set(ilist)) clr = [cmap(i) for i in np.linspace(0, 1, len(ilist2))] for i in range(0, len(ilist2)): ind = (kk["Entity"] == ilist2[i]) ax.scatter(kk["Skew²"][ind], kk["Kurtosis"][ind], kk["Alpha"][ind], s=30, c=clr[i], label=ilist2[i]) ax.set_xlabel("Skew²") ax.set_ylabel("Kurtosis") ax.set_zlabel(r"$\alpha$") ax.legend() plt.title(title + ": EDF-K-means") plt.savefig("masoq.png") plt.show() kk = pd.DataFrame({'Skew²': s2, 'Kurtosis': k, 'Alpha': alpha, 'Beta': beta, "Entity": ilist}, index=model.labels_) kk.sort_index(inplace=True) kk.to_csv("clusteringalpha.csv") model = KMeans() visualizer = KElbowVisualizer(model, k=(1, K)) kIdx = visualizer.fit(kk.drop(columns=["Alpha", "Entity"])) visualizer.show() kIdx = kIdx.elbow_value_ model = KMeans(n_clusters=kIdx).fit(kk.drop(columns=["Alpha", "Entity"])) fig = plt.figure(figsize=(20, 15)) ax = Axes3D(fig) cmap = plt.get_cmap('gnuplot') clr = [cmap(i) for i in np.linspace(0, 1, len(ilist2))] for i in range(0, len(ilist2)): ind = (kk["Entity"] == ilist2[i]) ax.scatter(kk["Skew²"][ind], kk["Kurtosis"][ind], kk["Beta"][ind], s=30, c=clr[i], label=ilist[i]) ax.set_xlabel("Skew²") ax.set_ylabel("Kurtosis") ax.set_zlabel(r"$\beta$") ax.legend() plt.title(title + ": EPSB-K-means") plt.savefig("masoq2.png") plt.show() kk = pd.DataFrame({'Skew²': s2, 'Kurtosis': k, 'Alpha': alpha, 'Beta': beta, "Entity": ilist}, index=model.labels_) kk.sort_index(inplace=True) kk.to_csv("clusteringbeta.csv") def main(): title = "Série: Chaos Noise. {0}" d, ilist, rawdata = makeseries(Logistic, ["Logistic"], 30) aux1, aux2, aux3 = makeseries(HenonMap, ["Henon"], 30) rawdata += aux3 d += aux1 ilist += aux2 for i in range(len(rawdata)): plt.figure(figsize=(20, 12)) # Plot da série temporal ax1 = plt.subplot(211) ax1.set_title(title.format(rawdata[i][0]), fontsize=18) ax1.plot(rawdata[i][1], rawdata[i][2], color="firebrick", marker='o', linestyle='') # Plot e cálculo do DFA ax2 = plt.subplot(223) ax2.set_title(r"Detrended Fluctuation Analysis - DFA $\alpha$={0:.3}".format(rawdata[i][3], fontsize=15)) ax2.plot(rawdata[i][4], rawdata[i][5], marker='o', linestyle='', color="#12355B") ax2.plot(rawdata[i][4], rawdata[i][6], color="#9DACB2") # Plot e cálculo do PSD ax3 = plt.subplot(224) ax3.set_title(r"Power Spectrum Density - PSD $\beta$={0:.3}".format(rawdata[i][12]), fontsize=15) ax3.set_yscale('log') ax3.set_xscale('log') ax3.plot(rawdata[i][7], rawdata[i][8], '-', color='deepskyblue', alpha=0.7) ax3.plot(rawdata[i][9], rawdata[i][10], color="darkblue", alpha=0.8) ax3.axvline(rawdata[i][7][rawdata[i][14]], color="darkblue", linestyle='--') ax3.axvline(rawdata[i][7][rawdata[i][15]], color="darkblue", linestyle='--') ax3.plot(rawdata[i][9], rawdata[i][13](rawdata[i][9], rawdata[i][11], rawdata[i][12]), color="#D65108", linestyle='-', linewidth=3, label='$%.4f$' % (rawdata[i][12])) ax2.set_xlabel("log(s)") ax2.set_ylabel("log F(s)") ax3.set_xlabel("Frequência (Hz)") ax3.set_ylabel("Potência") plt.savefig("Chaosserietemporalpsddfa{}.png".format(i)) plt.show() n=8192 ypoints=[min(rawdata[0][2]) + (i/n) * (max(rawdata[0][2])-min(rawdata[0][2])) for i in range(0, n+1)] gev_fit = genextreme.fit(rawdata[0][2]) # GEV parameters from fit: c, loc, scale = gev_fit mean, var, skew, kurt = genextreme.stats(c, moments='mvsk') rv_gev = genextreme(c, loc=loc, scale=scale) gev_pdf = rv_gev.pdf(ypoints) #plt.title((title+"\nMu= {1:.3}, Sigma={2:.3}.").format(rawdata[i][0], mu, sigma)) n, bins, patches = plt.hist(rawdata[0][2], 60, density=1, facecolor='powderblue', alpha=0.75, label="Normalized data") plt.plot(np.arange(min(bins), max(bins), (max(bins) - min(bins))/len(rawdata[0][2])), gev_pdf[:len(rawdata[0][2])],'r-', lw=5, alpha=0.6, label='genextreme pdf') plt.ylabel("Probability Density") plt.xlabel("Value") plt.legend() #plt.savefig("PDF"+filename.format(i)) plt.show() ''' (mu,sigma)=norm.fit(rawdata[i][2]) plt.title((title+"\nMu= {1:.3}, Sigma={2:.3}.").format(rawdata[i][0], mu, sigma)) n, bins, patches = plt.hist(rawdata[i][2], 60, density=1, facecolor='powderblue', alpha=0.75) plt.plot(bins,norm.pdf(bins,mu,sigma), c="black", linestyle='--') plt.savefig("colorednoise{}PDF.png".format(i)) plt.show() plt.figure(figsize=(20, 12)) ''' title = "chaosnoise" d = makeK(d, ilist, title) main() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="zH9NLWD13EjB" # Install the dependencies # !apt-get install openjdk-8-jdk-headless -qq > /dev/null # !wget -q https://archive.apache.org/dist/spark/spark-3.0.1/spark-3.0.1-bin-hadoop3.2.tgz # !tar xf spark-3.0.1-bin-hadoop3.2.tgz # !pip install -q findspark # + id="c6Cokct83h5R" # Set the environment variables for running PySpark in the collaboration environmentimport os import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-3.0.1-bin-hadoop3.2" # + id="WaccJdNb4hol" # Run the local session to test the installation import findspark findspark.init('spark-3.0.1-bin-hadoop3.2') from pyspark.sql import SparkSession spark = SparkSession.builder.master('local[*]').getOrCreate() # + colab={"base_uri": "https://localhost:8080/", "height": 216} id="Uq4KARAb0_AR" outputId="34ca97ea-b4b9-4fdf-a860-6c6117361232" spark # + id="N6ymCYUi2kO_" df = (spark.read .format("csv") .option('header', 'true') .load("drive/MyDrive/db/iris_frame.csv")) # + colab={"base_uri": "https://localhost:8080/"} id="HD4TXz5f5XQG" outputId="ac8cb1bc-f59a-493b-e9af-c1e2438cd2ae" df.show(5) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Z6fRlGq65ky2" outputId="b0c59f19-aa21-4bd2-85c1-125a664f669c" df.toPandas() # + colab={"base_uri": "https://localhost:8080/"} id="PdyoYZH_504N" outputId="ab8cd114-343f-4bad-c1e7-4077ce1b5df4" # How many rows we have df.count() # + colab={"base_uri": "https://localhost:8080/"} id="RaiphMRh55sF" outputId="e4744f99-3b27-44e4-b790-d91d0a9f0bec" # The names of our columns df.columns # + colab={"base_uri": "https://localhost:8080/"} id="pRthBq4y6Dki" outputId="d0aa39b8-5573-42da-b3cc-8f0c9ab66793" # Types of our columns df.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="TBUk7rJM6OAO" outputId="8ef04431-54f5-4f96-b25e-ca9fa123da15" # Basics stats from our columns df.describe().toPandas() # + colab={"base_uri": "https://localhost:8080/"} id="-O0ZCXh16S3W" outputId="3ec987f0-2ad7-47c1-d113-218e4d7527cf" from pyspark.sql.functions import col dataset = df.select(col('sepal length (cm)').cast('float'), col('sepal width (cm)').cast('float'), col('petal length (cm)').cast('float'), col('petal width (cm)').cast('float'), col('target').cast('integer') ) dataset.show() # + colab={"base_uri": "https://localhost:8080/"} id="zJc0dsuD7U6u" outputId="ed0fed49-a69c-4879-8b56-33581758854a" dataset.dtypes # + id="p3uVLZBa8PIl" # Assemble all the features with VectorAssembler required_features = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)' ] from pyspark.ml.feature import VectorAssembler assembler = VectorAssembler(inputCols=required_features, outputCol='features') transformed_data = assembler.transform(dataset) # + colab={"base_uri": "https://localhost:8080/"} id="Hes21iNV82XW" outputId="4ea76411-3494-47a8-e0f7-30d99a22748e" transformed_data.show(10) # + id="a49bETgf9Im1" # Modeling (training_data, test_data) = transformed_data.randomSplit([0.8,0.2]) # + id="FgIjr9dX9Yaz" from pyspark.ml.classification import RandomForestClassifier rf = RandomForestClassifier(labelCol='target', featuresCol='features', maxDepth=5) # + id="CKf-wAM89iib" model = rf.fit(training_data) # + id="qq43-X289nC8" predictions = model.transform(test_data) # + id="TAmO5iSJ9xJf" # Evaluate our model from pyspark.ml.evaluation import MulticlassClassificationEvaluator evaluator_accuracy = MulticlassClassificationEvaluator( labelCol='target', predictionCol='prediction', metricName='accuracy') # + colab={"base_uri": "https://localhost:8080/"} id="pghaVdy493et" outputId="55a633dc-3dad-4736-a83b-d2cdce4ccf8e" accuracy = evaluator_accuracy.evaluate(predictions) print('Test Accuracy = ', accuracy) # + id="adJn93diBoVr" evaluator_f1 = MulticlassClassificationEvaluator( labelCol='target', predictionCol='prediction', metricName='f1') # + colab={"base_uri": "https://localhost:8080/"} id="38028aHPCMKc" outputId="13a16cf6-9bde-4ad2-be2e-9a8760226b15" f1 = evaluator_f1.evaluate(predictions) print('Test f1 = ', f1) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import topojson as tp import geopandas import json # %matplotlib inline data = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres")) data.head() # - topo = tp.Topology(data) #topo.toposimplify(10, prevent_oversimplify=False, inplace=True)#.to_geojson() topo.to_widget() topo.toposimplify(20).to_gdf().plot() geopandas.GeoDataFrame().from_features(json.loads(topo.to_geojson())['features']).plot() # %matplotlib inline # + import geopandas from topojson.core.hashmap import Hashmap data = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres")) data = data[ (data.name == "Botswana") | (data.name == "South Africa") | (data.name == "Zimbabwe") | (data.name == "Zambia") ] # data = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres")) # data = data[ # (data.name == "Togo") | (data.name == "Benin") | (data.name == "Burkina Faso") # ] #topo = Dedup(data) topo = topojson.Topology(data, prequantize=False).toposimplify(2) topo.to_svg() # - topo # import json # geopandas.GeoDataFrame().from_features(json.loads(topo.to_geojson())['features']).plot() topo.to_gdf().iloc[0].geometry topo.to_gdf().iloc[0].geometry.wkt topo.to_gdf().plot() # + import pandas import geopandas import topojson as tp from shapely.wkt import loads df = pandas.DataFrame({ "name": ["P1", "P2", "P3"], "geometry": [ "POLYGON ((60.05 88.85, 60.3 86.9, 61.9 73.4, 51.85 72.1, 50.8 80.5, 57.95 81.4, 57.5 85.05, 59.05 85.25, 58.85 87.15, 60.05 88.85))", "POLYGON ((66.35 90.55, 65.75 86.8, 64.5 81.1, 63.7 77, 63 73.55, 61.9 73.4, 60.3 86.9, 64.15 87.45, 64.85 90.8, 66.35 90.55))", "POLYGON ((65.75 86.8, 70.5 87.45, 71.45 79, 69.85 78.85, 70.3 74.5, 64.15 73.75, 63.7 77, 64.5 81.1, 66.45 81.35, 65.75 86.8))" ] }) df['geometry'] = df['geometry'].apply(loads) gdf = geopandas.GeoDataFrame(df, geometry='geometry', crs='EPSG:27700') topo = tp.Topology(gdf, prequantize=False) # - topo.to_alt(color='properties.name:N') # + import pandas import geopandas import topojson as tp from shapely.wkt import loads df = pandas.DataFrame({ "name": ["P1", "P2", "P3"], "geometry": [ "POLYGON ((60.05 88.85, 60.3 86.9, 61.9 73.4, 51.85 72.1, 50.8 80.5, 57.95 81.4, 57.5 85.05, 59.05 85.25, 58.85 87.15, 60.05 88.85))", "POLYGON ((66.35 90.55, 65.75 86.8, 64.5 81.1, 63.7 77, 63 73.55, 61.9 73.4, 60.3 86.9, 64.15 87.45, 64.85 90.8, 66.35 90.55))", "POLYGON ((65.75 86.8, 70.5 87.45, 71.45 79, 69.85 78.85, 70.3 74.5, 64.15 73.75, 63.7 77, 64.5 81.1, 66.45 81.35, 65.75 86.8))" ] }) df['geometry'] = df['geometry'].apply(loads) gdf = geopandas.GeoDataFrame(df, geometry='geometry', crs='EPSG:27700') topo = tp.Topology(gdf, shared_coords=True) #topo = tp.Topology(gdf, prequantize=False) # - topo.to_alt(color='properties.name:N') topo.to_svg(separate=True) c = Cut(data, options={'prequantize':False}) c.to_svg(include_junctions=True, separate=False) d = Dedup(data, options={'prequantize':False}) d.to_svg(include_junctions=True, separate=True) # + from IPython.display import SVG, display from shapely.ops import linemerge def svg_split_view(geom): svg_custom = geom._repr_svg_() for c in ['green','blue', 'orange', 'red']: svg_custom = svg_custom.replace('stroke="#66cc99"', f'stroke="{c}"', 1) display(SVG(svg_custom)) in_geoms = loads('MULTILINESTRING ((5 0, 4 -1, 4 0), (4 0, 4 1, 3 1, 3 0), (3 0, 2 1, 2 0), (1 0, 1 1))') svg_split_view(in_geoms) # 4 non shared arcs # + import topojson as tp import geopandas as gpd import json # Load JSON geometry json_string = '{"type": "FeatureCollection", "features": [{"id": "0", "type": "Feature", "properties": {"certainty": 4}, "geometry": {"type": "Polygon", "coordinates": [[[556395.0, -2289375.0], [556485.0, -2289375.0], [556485.0, -2289735.0], [556455.0, -2289735.0], [556455.0, -2289705.0], [556395.0, -2289705.0], [556395.0, -2289735.0], [556335.0, -2289735.0], [556335.0, -2289705.0], [556365.0, -2289705.0], [556365.0, -2289675.0], [556395.0, -2289675.0], [556395.0, -2289615.0], [556365.0, -2289615.0], [556365.0, -2289555.0], [556335.0, -2289555.0], [556335.0, -2289465.0], [556365.0, -2289465.0], [556365.0, -2289435.0], [556395.0, -2289435.0], [556395.0, -2289375.0]]]}}, {"id": "1", "type": "Feature", "properties": {"certainty": 4}, "geometry": {"type": "Polygon", "coordinates": [[[556065.0, -2289075.0], [556155.0, -2289075.0], [556155.0, -2289135.0], [556125.0, -2289135.0], [556125.0, -2289195.0], [556095.0, -2289195.0], [556095.0, -2289225.0], [556065.0, -2289225.0], [556065.0, -2289375.0], [556095.0, -2289375.0], [556095.0, -2289465.0], [556125.0, -2289465.0], [556125.0, -2289525.0], [556155.0, -2289525.0], [556155.0, -2289615.0], [556125.0, -2289615.0], [556125.0, -2289645.0], [556155.0, -2289645.0], [556155.0, -2289675.0], [556125.0, -2289675.0], [556125.0, -2289735.0], [556155.0, -2289735.0], [556155.0, -2289765.0], [556185.0, -2289765.0], [556185.0, -2289795.0], [556215.0, -2289795.0], [556215.0, -2289825.0], [556245.0, -2289825.0], [556245.0, -2289855.0], [556305.0, -2289855.0], [556305.0, -2289825.0], [556335.0, -2289825.0], [556335.0, -2289795.0], [556365.0, -2289795.0], [556365.0, -2289825.0], [556395.0, -2289825.0], [556395.0, -2289855.0], [556455.0, -2289855.0], [556455.0, -2289825.0], [556485.0, -2289825.0], [556485.0, -2289885.0], [556455.0, -2289885.0], [556455.0, -2289915.0], [556425.0, -2289915.0], [556395.0, -2289915.0], [556395.0, -2289945.0], [556365.0, -2289945.0], [556305.0, -2289945.0], [556305.0, -2289975.0], [556275.0, -2289975.0], [556245.0, -2289975.0], [556245.0, -2290005.0], [556215.0, -2290005.0], [556155.0, -2290005.0], [556155.0, -2290035.0], [556125.0, -2290035.0], [556095.0, -2290035.0], [556095.0, -2290065.0], [556065.0, -2290065.0], [556035.0, -2290065.0], [556035.0, -2290095.0], [556005.0, -2290095.0], [556000.0, -2290095.0], [556000.0, -2289135.0], [556005.0, -2289135.0], [556035.0, -2289135.0], [556035.0, -2289105.0], [556065.0, -2289105.0], [556065.0, -2289075.0]]]}}, {"id": "2", "type": "Feature", "properties": {"certainty": 0}, "geometry": {"type": "MultiPolygon", "coordinates": [[[[556000.0, -2290095.0], [556005.0, -2290095.0], [556035.0, -2290095.0], [556035.0, -2290065.0], [556065.0, -2290065.0], [556095.0, -2290065.0], [556095.0, -2290035.0], [556125.0, -2290035.0], [556155.0, -2290035.0], [556155.0, -2290005.0], [556215.0, -2290005.0], [556245.0, -2290005.0], [556245.0, -2289975.0], [556275.0, -2289975.0], [556305.0, -2289975.0], [556305.0, -2289945.0], [556365.0, -2289945.0], [556395.0, -2289945.0], [556395.0, -2289915.0], [556425.0, -2289915.0], [556455.0, -2289915.0], [556455.0, -2289885.0], [556485.0, -2289885.0], [556485.0, -2290250.0], [556000.0, -2290250.0], [556000.0, -2290095.0]]], [[[556485.0, -2289000.0], [556485.0, -2289375.0], [556455.0, -2289375.0], [556395.0, -2289375.0], [556395.0, -2289435.0], [556365.0, -2289435.0], [556365.0, -2289465.0], [556335.0, -2289465.0], [556335.0, -2289555.0], [556365.0, -2289555.0], [556365.0, -2289615.0], [556395.0, -2289615.0], [556395.0, -2289675.0], [556365.0, -2289675.0], [556365.0, -2289705.0], [556335.0, -2289705.0], [556335.0, -2289735.0], [556395.0, -2289735.0], [556395.0, -2289705.0], [556455.0, -2289705.0], [556455.0, -2289735.0], [556485.0, -2289735.0], [556485.0, -2289825.0], [556455.0, -2289825.0], [556455.0, -2289855.0], [556395.0, -2289855.0], [556395.0, -2289825.0], [556365.0, -2289825.0], [556365.0, -2289795.0], [556335.0, -2289795.0], [556335.0, -2289825.0], [556305.0, -2289825.0], [556305.0, -2289855.0], [556245.0, -2289855.0], [556245.0, -2289825.0], [556215.0, -2289825.0], [556215.0, -2289795.0], [556185.0, -2289795.0], [556185.0, -2289765.0], [556155.0, -2289765.0], [556155.0, -2289735.0], [556125.0, -2289735.0], [556125.0, -2289675.0], [556155.0, -2289675.0], [556155.0, -2289645.0], [556125.0, -2289645.0], [556125.0, -2289615.0], [556155.0, -2289615.0], [556155.0, -2289525.0], [556125.0, -2289525.0], [556125.0, -2289465.0], [556095.0, -2289465.0], [556095.0, -2289375.0], [556065.0, -2289375.0], [556065.0, -2289225.0], [556095.0, -2289225.0], [556095.0, -2289195.0], [556125.0, -2289195.0], [556125.0, -2289135.0], [556155.0, -2289135.0], [556155.0, -2289075.0], [556065.0, -2289075.0], [556065.0, -2289105.0], [556035.0, -2289105.0], [556035.0, -2289135.0], [556005.0, -2289135.0], [556000.0, -2289135.0], [556000.0, -2289000.0], [556485.0, -2289000.0]]]]}}]}' json_data = json.loads(json_string) # Convert to GeoDataFrame gdf = gpd.GeoDataFrame.from_features(json_data["features"]) # Construct topology and simplify (with shared_coords=False) topo = tp.Topology(gdf, shared_coords=False, prequantize=False) simplified_gdf = topo.toposimplify(30).to_gdf() # - simplified_gdf topo topo.toposimplify(30).to_alt(color='properties.certainty:Q') out_geoms = linemerge(in_geoms) svg_split_view(out_geoms) # 3 out of 4 can be merged # + import pandas import geopandas import topojson as tp from shapely.wkt import loads df = pandas.DataFrame({ "name": ["P1", "P2", "P3"], "geometry": [ "POLYGON ((60.05 88.85, 60.3 86.9, 61.9 73.4, 51.85 72.1, 50.8 80.5, 57.95 81.4, 57.5 85.05, 59.05 85.25, 58.85 87.15, 60.05 88.85))", "POLYGON ((66.35 90.55, 65.75 86.8, 64.5 81.1, 63.7 77, 63 73.55, 61.9 73.4, 60.3 86.9, 64.15 87.45, 64.85 90.8, 66.35 90.55))", "POLYGON ((65.75 86.8, 70.5 87.45, 71.45 79, 69.85 78.85, 70.3 74.5, 64.15 73.75, 63.7 77, 64.5 81.1, 66.45 81.35, 65.75 86.8))" ] }) df['geometry'] = df['geometry'].apply(loads) gdf = geopandas.GeoDataFrame(df, geometry='geometry', crs='EPSG:27700') topo = tp.Topology(gdf, prequantize=False) # - topo.to_gdf() topo.to_alt(color='properties.name:N') # + The issue is in the process when the remaining non-shared arcs are tried to be merged again. In this process are currently two options covered (1) the first and last non-shared arc can be merged and (2) all arcs can be merged. Until know this were the two situations It seems the assumption that this logic works for all cases has been proven wrong. Somehow need to find a better method to detect which segment-indices where merged # - The shared segments of a linestring are collected from all geometries. The remaining non-shared segments of each linestring are then tried to be merged. These merged contigious segment replaces the # + import pandas import geopandas import topojson as tp from shapely.wkt import loads df = pandas.DataFrame({ "name": ["P1", "P2", "P3"], "geometry": [ "POLYGON ((60.05 88.85, 60.3 86.9, 61.9 73.4, 51.85 72.1, 50.8 80.5, 57.95 81.4, 57.5 85.05, 59.05 85.25, 58.85 87.15, 60.05 88.85))", "POLYGON ((66.35 90.55, 65.75 86.8, 64.5 81.1, 63.7 77, 63 73.55, 61.9 73.4, 60.3 86.9, 64.15 87.45, 64.85 90.8, 66.35 90.55))", "POLYGON ((65.75 86.8, 70.5 87.45, 71.45 79, 69.85 78.85, 70.3 74.5, 64.15 73.75, 63.7 77, 64.5 81.1, 66.45 81.35, 65.75 86.8))" ] }) df['geometry'] = df['geometry'].apply(loads) gdf = geopandas.GeoDataFrame(df, geometry='geometry', crs='EPSG:27700') topo = Dedup(gdf, options={'prequantize':False}) # - topo # + import topojson as tp import geopandas as gpd # Read file gdf = gpd.read_file(r"C:\Users\lilei\Downloads\polygons\polygons.json") # Construct topology and simplify topo = tp.Topology(gdf, prequantize=False) simplified_gdf = topo.toposimplify(30).to_gdf() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3yzVZcFVW4J3" colab_type="text" # # `nnetsauce`'s Ridge2 Multitask classifier # # This notebook demonstrates the use of `nnetsauce`'s Ridge2 Multitask classifier. It's a quasi-randomized network model for classification with 2 shrinkage parameters (one on input data, another one on the hidden layer). In this model, a multi-class classification problem is turned into multiple two-class problems. # + id="I2BYGMHm_YT4" colab_type="code" colab={} pip install git+https://github.com/thierrymoudiki/nnetsauce.git # + id="VSpcAAgc_Hyp" colab_type="code" colab={} import nnetsauce as ns import numpy as np from sklearn.datasets import load_breast_cancer, load_wine, load_iris, load_digits, make_classification from sklearn.model_selection import train_test_split from sklearn import metrics from time import time # + id="7cB4SMlzq-z2" colab_type="code" outputId="a69936aa-b29c-4c52-88b8-c34b13b1646f" colab={"base_uri": "https://localhost:8080/", "height": 260} # dataset no. 1 ---------- breast_cancer = load_breast_cancer() Z = breast_cancer.data t = breast_cancer.target np.random.seed(123) X_train, X_test, y_train, y_test = train_test_split(Z, t, test_size=0.2) print(Z.shape) fit_obj = ns.Ridge2MultitaskClassifier(n_hidden_features=np.int(9.83730469e+01), dropout=4.31054687e-01, n_clusters=np.int(1.71484375e+00), lambda1=1.24023438e+01, lambda2=7.30263672e+03) start = time() fit_obj.fit(X_train, y_train) print(time() - start) print(fit_obj.score(X_test, y_test)) print(fit_obj.score(X_test, y_test, scoring="roc_auc")) start = time() preds = fit_obj.predict(X_test) print(time() - start) print(metrics.classification_report(preds, y_test)) # + id="JnfY88AkrBNi" colab_type="code" outputId="72aac767-04be-4e69-ff9a-4872ba11c8bd" colab={"base_uri": "https://localhost:8080/", "height": 225} # dataset no. 2 ---------- wine = load_wine() Z = wine.data t = wine.target np.random.seed(123) Z_train, Z_test, y_train, y_test = train_test_split(Z, t, test_size=0.2) fit_obj = ns.Ridge2MultitaskClassifier(n_hidden_features=15, dropout=0.1, n_clusters=3, type_clust="gmm") start = time() fit_obj.fit(Z_train, y_train) print(time() - start) print(fit_obj.score(Z_test, y_test)) preds = fit_obj.predict(Z_test) print(metrics.classification_report(preds, y_test)) # + id="wnfLXUAerDvs" colab_type="code" outputId="e63e159b-e028-4079-8ab0-5734b6429a0f" colab={"base_uri": "https://localhost:8080/", "height": 225} # dataset no. 3 ---------- iris = load_iris() Z = iris.data t = iris.target np.random.seed(123) Z_train, Z_test, y_train, y_test = train_test_split(Z, t, test_size=0.2) fit_obj = ns.Ridge2MultitaskClassifier(n_hidden_features=10, dropout=0.1, n_clusters=2) start = time() fit_obj.fit(Z_train, y_train) print(time() - start) print(fit_obj.score(Z_test, y_test)) preds = fit_obj.predict(Z_test) print(metrics.classification_report(preds, y_test)) # + id="opUwVh8FOoAv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="7766faa7-cf92-436a-bf9a-341d268cfdc9" # dataset no. 4 ---------- digits = load_digits() X = digits.data y = digits.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123) fit_obj = ns.Ridge2MultitaskClassifier(n_hidden_features=25, dropout=0.1, n_clusters=3, type_clust="gmm") start = time() fit_obj.fit(X_train, y_train) print(time() - start) print(fit_obj.score(X_test, y_test)) start = time() preds = fit_obj.predict(X_test) print(time() - start) print(metrics.classification_report(preds, y_test)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### use tf.keras subclass model to build your own custom model # + import argparse import os import numpy as np import sys from config import get_cfg_defaults parser = argparse.ArgumentParser() parser.add_argument( "--config-file", default=None, metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args([]) # + cfg = get_cfg_defaults() if args.config_file is not None: cfg.merge_from_file(args.config_file) if args.opts is not None: cfg.merge_from_list(args.opts) cfg.freeze() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.SYSTEM.DEVICE) # + # model.py import tensorflow as tf import tensorflow.keras.layers as layers import tensorflow.keras.models as models import tensorflow.nn as F class Conv_bn_relu(models.Model): """Stack blocks of Conv2D->BN->relu. Args: filters (int): numbers of filters of conv layer kernel_size (int): filter size strides (int): stride step data_format (str): channels_first or channels_last use_bias (bool): add bias to layer? Returns: tf.keras.model object """ def __init__(self, filters, kernel_size=3, strides=1, data_format="channels_last", use_bias=True, **kwargs): super(Conv_bn_relu, self).__init__(**kwargs) axis = -1 if data_format is "channels_last" else 1 self.conv = Conv2DFixedPadding(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=use_bias) self.normalize = layers.BatchNormalization(axis=axis) def call(self, x, training=True): x = self.conv(x) x = self.normalize(x, training=training) return F.relu(x) class StackCNN(models.Model): """Stack all required layers together. Args: neurons_of_layers (list): list of filter size of convolution layers output_units (int): units of output node Returns: tf.keras.model object """ def __init__(self, neurons_of_layers, output_units, **kwargs): super(StackCNN, self).__init__(**kwargs) self.layers_list = [] for i, l in enumerate(neurons_of_layers): if (i+1) != len(neurons_of_layers): self.layers_list.append(Conv_bn_relu(filters=l, kernel_size=3, strides=1)) self.layers_list.append(layers.MaxPooling2D(pool_size=(2,2))) else: self.layers_list.append(Conv_bn_relu(filters=l, kernel_size=3, strides=1)) self.layers_list.append(layers.Flatten()) self.layers_list.append(layers.Dense(units=output_units)) def call(self, x, training=True): for l in self.layers_list: try: # some customized layer should give training flags x = l(x, training=training) except: # some original layers may not have training flags x = l(x) return F.softmax(x) ## Fixed Functions ## def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. This function is copied from: https://github.com/tensorflow/models/blob/master/official/resnet/resnet_model.py Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs class Conv2DFixedPadding(models.Model): """Class for Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). This class is based on: https://github.com/tensorflow/models/blob/master/official/resnet/resnet_model.py """ def __init__(self, filters, kernel_size=3, strides=1, data_format="channels_last", use_bias=True, **kwargs): super(Conv2DFixedPadding, self).__init__(**kwargs) self.kernel_size = kernel_size self.data_format = data_format self.strides = strides self.conv = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=use_bias, data_format=data_format) def call(self, x): if self.strides > 1: x = fixed_padding(x, self.kernel_size, self.data_format) return self.conv(x) # - from tensorflow.keras.datasets.cifar10 import load_data # + train, valid = load_data() x_train, y_train = train x_valid, y_valid = valid x_train = x_train / 255. x_valid = x_valid / 255. y_train = tf.keras.utils.to_categorical(y_train, 10) y_valid = tf.keras.utils.to_categorical(y_valid, 10) # - input_layer = layers.Input(shape=[32,32,3]) module = StackCNN(neurons_of_layers=[32,32,64], output_units=10)(input_layer) model = models.Model(inputs=input_layer, outputs=module) model.compile(loss="categorical_crossentropy", optimizer=tf.train.AdamOptimizer(), metrics=["acc"]) model.summary() model.get_layer('stack_cnn').summary() model.fit(x_train, y_train, epochs=cfg.TRAIN.EPOCHS, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=True, validation_data=(x_valid, y_valid)) # + def build_example_model(): input_layer = layers.Input(shape=(32,32,3)) x = layers.Conv2D(filters=32, kernel_size=3, strides=1)(input_layer) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.MaxPooling2D(pool_size=(2,2))(x) x = layers.Conv2D(filters=32, kernel_size=3, strides=1)(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.MaxPooling2D(pool_size=(2,2))(x) x = layers.Conv2D(filters=64, kernel_size=3, strides=1)(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(units=10, activation="softmax")(x) return models.Model(inputs=input_layer, outputs=x) model = build_example_model() model.compile(loss="categorical_crossentropy", optimizer=tf.train.AdamOptimizer(), metrics=["acc"]) model.summary() # - model.fit(x_train, y_train, epochs=cfg.TRAIN.EPOCHS, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=True, validation_data=(x_valid, y_valid)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # %load_ext autoreload # # %autoreload 2Setup # To run this notebook, you'll need (do these in order): # # The suitesparse library: # # From conda: # # conda install suitesparse # # On ubuntu/similar linux # # apt-get install suitesparse # # My version of the PySPQR repository (This is where you need suitesparse) # # https://www.github.com/smithb/PySPQR.git # # My LSsurf repository # # https://www.github.com/smithb/LSsurf.git # # My pointCollection repository: # # https://www.github.com/smithb/pointCollection.git # # For each repository, you'll need to clone the repo (git clone [url to .git file]), then cd to the # # directory that git makes, and type: # # python3 setup.py install --user # # Good luck! # %load_ext autoreload # %autoreload 2 import matplotlib.pyplot as plt import numpy as np from LSsurf.smooth_xytb_fit import smooth_xytb_fit import pointCollection as pc # # Introduction # The ATL14/15 algorithm works by fitting a time-varying surface to the data. The form of the model is: # $$ # z_m(x, y, t) = z_0(x,y) + \delta z(x, y, t) # $$ # Here $z_0$ is a DEM giving the surface height at time $t_0$, and $dz(x,y,t)$ gives the surface-height change between $t_0$ and $t$ at location $x,y$. The DEM is represented as a high-resolution grid of elevations, while $dz$ is represented as a set of lower-resolution surfaces, one for each quarter-year interval. The model is constructed so that the $z_0$ surface for time $t_0$ is uniformally equal to zero. # # We find the surface by minimizing the quantity: # $$ # R = W_{xx0}^2 \int (\nabla^2 z_0)^2 dA + W_{x0} \int (\nabla z_0)^2 dA + W_{xxt}\int (\nabla^2 \frac{\partial\delta z}{\partial t})^2 dAdt + W_{xt}\int (\nabla \frac{\partial\delta z}{\partial t})^2 dAdt + W_{tt}\int (\frac{\partial^2 \delta z}{ \partial t^2})^2 dA + \sum (\frac{z_m(x,y,t)-z_i(x,y,t)}{\sigma_i})^2 # $$ # Here $W_{xx0}$ is the inverse of the expected RMS of the second spatial derivatives of the surface height, $W_{x0}$ is the the inverse of the expected RMS of the first derivatives of the surface height, $W_{xxt}$ is the the inverse of the expected RMS of the second spatial derivatives of the $dz/dt$ field, etc. The last term is the sum of the error-scaled residuals between the data and the model. I've put some mathematical description of how this model behaves in the attenuation_curves.ipynb notebook in this repo's directory. # # To construct a surface, we need to specify the data values, the model grid resolutions for the DEM and for the height-change surfaces, the dimensions of the grid, and the expected derivative values. # # ## Solutions in one dimension (x) # Initially, we will demonstrate the fit on a long, skinny domain, to illustrate how the model works in one dimension. We will specify identical values for the data for two different time epochs, so that there is no time variation in the solution, and all variation in the solution is in the DEM ($z_0$) field. # define the domain's width in x, y, and time W={'x':1.e4,'y':200,'t':2} # define the grid center: ctr={'x':0., 'y':0., 't':0.} # define the grid spacing spacing={'z0':50, 'dz':50, 'dt':0.25} # define the data as a sine wave with a wavelength of 2 km and an amplitude of 100m. x=np.arange(-W['x']/2, W['x']/2, 100) lambda_x=2000 amp=100 data_sigma=1 D=pc.data().from_dict({'x':x, 'y':np.zeros_like(x),'z':-amp*np.cos(2*np.pi*x/lambda_x),\ 'time':np.zeros_like(x)-0.5, 'sigma':np.zeros_like(x)+data_sigma}) # To ensure a time-constant simulation, replicate the data at times -0.5 and 0.5: data=pc.data().from_list([D, D.copy().assign({'time':np.zeros_like(x)+0.5})]) # + # define the expected statistics of the surface E_d3zdx2dt=0.0001 E_d2z0dx2=0.06 E_d2zdt2=5000 data_gap_scale=2500 E_RMS={'d2z0_dx2':E_d2z0dx2, 'dz0_dx':E_d2z0dx2*data_gap_scale, 'd3z_dx2dt':E_d3zdx2dt, 'd2z_dxdt':E_d3zdx2dt*data_gap_scale, 'd2z_dt2':E_d2zdt2} # run the fit S=smooth_xytb_fit(data=data, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=2, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=False, dzdt_lags=[1]) # plot the results plt.figure() plt.clf() plt.plot(data.x, data.z,'ko', label='data') plt.plot(S['m']['z0'].x, S['m']['z0'].z0[0,:],'r', label='model') plt.legend(); # - # Reducing the expected derivatives results in a smoother surface that does not fit the data as well: # + plt.figure() plt.clf() plt.plot(data.x, data.z,'ko', label='data') A_z0={} A_expected={} # data density rd = data.size/W['x']/W['y'] for E_d2z in [0.006, 0.001, 0.0003]: E_RMS['d2z0_dx2'] = E_d2z # run the fit S=smooth_xytb_fit(data=data, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=2, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=False, dzdt_lags=[1]) plt.plot(S['m']['z0'].x, S['m']['z0'].z0[0,:], label=f'E_d2z0_dx2={E_d2z}') # calculate the amplitude A_z0[E_d2z]=np.max(np.abs(S['m']['z0'].z0[0,np.abs(S['m']['z0'].x)<3000])) # Calculate the expected amplitude A_expected[E_d2z] = amp /( 1 + 16*E_d2z**-2*np.pi**4/(lambda_x**4*rd)*data_sigma**2) plt.xlabel('x, m') plt.ylabel('z0, m') plt.legend(loc='lower right') E_RMS['d2z0_dx2']= 0.006 # - # The expected amplitude for a model fit to data representing a sine wave of amplitude $A_d$ with wavelength $\lambda$ and estimated error $\sigma$ is: # # $$A_m = \frac{A_{d}}{1 + \frac{16 \pi^{4} \sigma^{2} }{E_{xx}^2\lambda^{4} \rho}}$$ # # Here $E_{xx}$ is the expected second spatial derivative of $z0$. A plot of the recovered model amplitude vs. $E_{xx}$ is consistent with this model: # + Ezz_vals = np.arange(0.0002, 0.007, 0.0001) A_expected = amp /( 1 + 16*np.pi**4*data_sigma**2/(Ezz_vals**2*lambda_x**4*rd)) plt.figure(); plt.clf() plt.plot(Ezz_vals, A_expected, label='expected') temp=np.array(list(A_z0.keys())) plt.plot(temp, np.array([A_z0[key] for key in A_z0.keys()]),'o', label='recovered') plt.xlabel('$E_{xx}$') plt.ylabel('amplitude') plt.legend() # - # The values recovered from the fit are within a few percent of those expected based on the analytic expression. # ## Fitting data with gaps # If there is a gap in the data, the solution will tend to form a smooth arc over the gap. The smaller the expected first derivative of the data, the more the solution will flaten out across the gap. For solutions of this type, the solution will tend to go flat over a distance $data\_gap\_scale$ if $E[RMS(dz/dx)] = E[RMS(d^2z/dx^2)]*data\_gap\_scale$ # # As you might imagine, we try to set _data\_gap\_scale_ to be about half as large as we expect gaps in the data to be, so that large extrapolations don't produce odd values in the solution. We can demonstrate how the solution behaves in over data gaps by deleting the central 3 km of the data from our previous example, and fitting the model to the remaining data with different values of $data\_gap\_scale$. # + #make a set of data with a gap data_with_gap=data[np.abs(data.x)>1500] plt.figure() plt.clf() plt.plot(data_with_gap.x, data_with_gap.z,'ko', label='data') # run the solution with different data gap scales for this_data_gap_scale in [4000, 2000, 1000, 500, 250]: E_RMS['dz0_dx'] = E_RMS['d2z0_dx2']*this_data_gap_scale S=smooth_xytb_fit(data=data_with_gap, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=2, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=False, dzdt_lags=[1]) plt.plot(S['m']['z0'].x, S['m']['z0'].z0[0,:], label=f'data gap scale={this_data_gap_scale}') plt.legend(loc='lower right'); plt.xlabel('x'); plt.ylabel('h') # - # The smaller data_gap_scale values result in a flatter solution inside the gap, at the expense of larger misfits. Excessively small data_gap_scale values are also undesirable because they can introduce artificial flattening in smooth ice-sheet regions with consistent surface slopes. We set data_gap_scale to 1500 m (equal to half the ICESat-2 pair-to-pair spacing), which seems to produce clean results. # ## Solutions in one dimension and time (x, t) # Now let's see what happens when the solution can vary in space and time. We'll specify a flat surface for t=-0.99 (just after the start of the solution) and a sinusoidal surface for t=0.99 (just before the end). We will specify that the DEM is for reference epoch 4 (t=0). # + D0=pc.data().from_dict({'x':x, 'y':np.zeros_like(x),'z':np.zeros_like(x),\ 'time':np.zeros_like(x)-0.99, 'sigma':np.zeros_like(x)+1}) D1=pc.data().from_dict({'x':x, 'y':np.zeros_like(x),'z':amp-amp*np.cos(2*np.pi*x/lambda_x),\ 'time':np.zeros_like(x)+0.99, 'sigma':np.zeros_like(x)+1}) data_dt=pc.data().from_list([D0, D1]) data_gap_scale=2500 E_RMS['d3z_dx2dt'] = 0.006 E_RMS['d2z_dxdt'] = 0.006*data_gap_scale E_RMS['d2z0_dx2'] = 0.03 E_RMS['dz0_dx'] = 0.03*data_gap_scale S=smooth_xytb_fit(data=data_dt, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=False, dzdt_lags=[1]) plt.figure();plt.clf() plt.plot(D0.x, D0.z,'x', color='gray', label='data, t=-0.99') plt.plot(D1.x, D1.z,'ko', label='data, t=0.99') for epoch in range(S['m']['dz'].shape[2]): this_time=S['m']['dz'].time[epoch] plt.plot(S['m']['dz'].x, S['m']['z0'].z0[2,:]+S['m']['dz'].dz[2,:, epoch], label=f'$\delta z$, t={this_time}') plt.plot(S['m']['z0'].x, S['m']['z0'].z0[2,:],'k', label='z0', linewidth=2) plt.xlabel('x') plt.ylabel('z') plt.legend(); plt.figure(); plt.clf() for epoch in range(S['m']['dz'].shape[2]): this_time=S['m']['dz'].time[epoch] plt.plot(S['m']['dz'].x, S['m']['dz'].dz[2,:, epoch], label=f'$\delta z$, t={this_time}') plt.xlabel('x') plt.ylabel('$\delta$ z') plt.legend(); # - # The recovered surface matches the data at t=-1 and t=1, and varies smoothly in between. Because the reference epoch is halfway between the two data sets, its value is halfway between flat surface (at $t \approx -1$) and the raised sinusoid (at $t \approx 1$). The $\delta z$ fields smoothly so that at any point, the surface varies approximately linearly from the $t=-1$ to its $t=1$ solution: plt.figure(6); # find a point close to x=1000 ii=np.argmin(np.abs(S['m']['dz'].x-1000)) # plot the time series plt.plot(S['m']['dz'].time, S['m']['dz'].dz[2, ii, :]) # If we add another time point at t=-0.25 that is not colinear (in time) with the other time points, we get a smooth time variation at each point: # + D2 = pc.data().from_dict({'x':x, 'y':np.zeros_like(x),'z':-1*amp+np.zeros_like(x),\ 'time':np.zeros_like(x)-0.25, 'sigma':np.zeros_like(x)+1}) data_dt2=pc.data().from_list([D0, D1, D2]) S=smooth_xytb_fit(data=data_dt2, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=False, dzdt_lags=[1]) plt.figure(7); # Find the data points closest x=1000 di=np.where(np.abs(data_dt2.x-1000)<2) plt.plot(data_dt2.time[di], data_dt2.z[di],'o', label='data for x=1000') # find a model point close to x=1000 ii=np.argmin(np.abs(S['m']['dz'].x-1000)) # plot the recovered time series plt.plot(S['m']['dz'].time, S['m']['dz'].dz[2, ii, :], label='dz for x=1000') plt.plot(S['m']['dz'].time, S['m']['dz'].dz[2, ii, :] + S['m']['z0'].z0[2,ii], label='dz+z0 for x=1000') plt.legend(); plt.xlabel('time') plt.ylabel('h') # - # # Editing outliers # Outliers are identified based on the distribution of scaled residuals in the data. We iterate the solution and at each iteration calculate a robust estimate of the standard deviation of residuals in the data ($\hat{\sigma}$), then remove the outliers that are larger than $3\hat{\sigma}$. Let's return to the example with two data epochs, add some noise to all the data, and large noise values to a subset of the data. # + data_dte=pc.data().from_list([D0, D1]) data_dte.z += np.random.randn(data_dte.size) # introduce about 10% large outliers outliers = np.argwhere(np.random.rand(data_dte.size) > 0.90).ravel() data_dte.z[outliers] += (np.random.rand(outliers.size)-0.5)*200 plt.figure(8) plt.plot(data_dte.x, data_dte.z,'.', label='data') plt.plot(data_dte.x[outliers], data_dte.z[outliers],'r*', label='outliers') # - # We can see what these outliers do to the solution without editing by running the solution with only one iteration. Note that we have asked for verbose output from _smooth\_xytb\_fit_, which reports on the robust spread and the outliers from each iteration. # + S=smooth_xytb_fit(data=data_dte, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=1, VERBOSE=True, dzdt_lags=[1]) plt.figure(9, figsize=[9, 4]); plt.clf() plt.subplot(121) plt.plot(data_dte.x, data_dte.z,'k.', label='data') plt.plot(S['m']['dz'].x, S['m']['z0'].z0[2,:]+S['m']['dz'].dz[2, :, 0],'k', label='solution, t=-1') plt.plot(S['m']['dz'].x, S['m']['z0'].z0[2,:]+S['m']['dz'].dz[2, :, -1],'b', label='solution, t=1') plt.xlabel('x') plt.ylabel('h') plt.subplot(122) r=S['data'].z-S['data'].z_est plt.hist(r, np.arange(-101, 100, 2)); plt.xlabel('residual') plt.ylabel('count') # - # Running the solution for more iterations eliminates a greater share of the outliers. The acceptance (or non-acceptance) of the data points is stored in the _three\_sigma\_edit_ field of S['data']. # + S=smooth_xytb_fit(data=data_dte, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=10, VERBOSE=True, dzdt_lags=[1]) plt.figure(9, figsize=[9, 4]); plt.clf() plt.subplot(121) d_out=S['data'] plt.plot(d_out.x[d_out.three_sigma_edit==1], d_out.z[d_out.three_sigma_edit==1],'k.') plt.plot(d_out.x[d_out.three_sigma_edit==0], d_out.z[d_out.three_sigma_edit==0],'r.') plt.plot(S['m']['dz'].x, S['m']['z0'].z0[2,:]+S['m']['dz'].dz[2, :, 0],'k') plt.plot(S['m']['dz'].x, S['m']['z0'].z0[2,:]+S['m']['dz'].dz[2, :, -1],'b') plt.subplot(122) r=d_out.z-d_out.z_est plt.hist(r, np.arange(-101, 100, 2), color='red') plt.hist(r[d_out.three_sigma_edit==1], np.arange(-101, 100, 2), color='blue'); # - # This only works well if the constraints allow a fairly good fit between the data and the model. If the best misfit allowed by the constraint is large, then the histogram of residuals is broad, and the distinction (in residual space) between outliers and datapoints is not as large. # # Data biases # # Let's make a new example, with several cycles of data on the same line. Each will have a value that is displaced from a trend line to simulate correlated errors (biases) in the data. # define simulation parameters sigma_uncorr=0.1 bias_mag=0.25 #t_vals=np.arange(-0.99, 0.99+0.1, 0.1) t_vals=np.linspace(-.99, 0.99, 9) z_vals=0.5*t_vals # define the bias values bias_vals=np.random.randn(len(z_vals))*bias_mag # make dataset x1=np.arange(-W['x']/2, W['x']/2, 5) y1=np.zeros_like(x1) D_list=[] for cycle in range(len(t_vals)): this_z = np.zeros_like(x1)+z_vals[cycle]+bias_vals[cycle] + np.random.rand(x1.size)*sigma_uncorr D_list.append( pc.data().from_dict({'x':x1,'y':y1,'time':np.zeros_like(x1)+t_vals[cycle], 'cycle':np.zeros_like(x1)+cycle,\ 'z':this_z, 'sigma':np.zeros_like(x1)+sigma_uncorr,'sigma_corr':np.zeros_like(x1)+bias_mag}) ) data_biased=pc.data().from_list(D_list) # Note that we have specified a _sigma\_corr_ value that describes the correlated error magnitude. It doesn't get used until we tell the inversion to solve for errors. # + # fit the biased data Sb=smooth_xytb_fit(data=data_biased, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=10, VERBOSE=False, dzdt_lags=[1]) # plot the results: # find a row and a column in the center of the simulation r0_dz, c0_dz, e0_dz = np.round(np.array(Sb['m']['dz'].shape)/2).astype(int) r0_z0, c0_z0 = np.round(np.array(Sb['m']['z0'].shape)/2).astype(int) # plot the model for the center point plt.figure(10); plt.plot(Sb['m']['dz'].time, Sb['m']['dz'].dz[r0_dz, c0_dz, :]+Sb['m']['z0'].z0[r0_z0, c0_z0], label='biased model') plt.plot(t_vals, bias_vals+z_vals, '*', label='data') plt.legend() # - # The result is that the recovered delta-z signal matches the biased data well. If we fit the data without taking into account the biases, the large number of data points with the same bias exert a strong influence on the model, so the smoothness constraints don't help much to suppress signals related to the biases. # # ## Estimating data biases # # We can tell the inversion that there is a bias parameter using the _blas\_params_ keyword, which takes a list of all parameters over which the bias is correlated: the solution will estimate one bias for each combination of unique values of the parameter in the list, and will set the expected RMS value for each parameter to the median of the correlated error ( _sigma\_corr_ ) values for the data. If there are four cycles in the data, there will be four bias estimates. In solving the ATL14/15 problem, we set _bias\_params_ to ['rgt','cycle'], so there is one bias estimated for each rgt and each cycle. Sbc=smooth_xytb_fit(data=data_biased, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=False, max_iterations=10, VERBOSE=False, dzdt_lags=[1], bias_params=['cycle']) plt.figure(11); plt.plot(Sb['m']['dz'].time, Sb['m']['dz'].dz[r0_dz, c0_dz, :]+Sb['m']['z0'].z0[r0_z0, c0_z0], label='no bias estimate') plt.plot(Sbc['m']['dz'].time, Sbc['m']['dz'].dz[r0_dz, c0_dz, :]+Sbc['m']['z0'].z0[r0_z0, c0_z0], label='cycle bias estimated') plt.plot(t_vals, bias_vals+z_vals, '*', label='data') plt.legend() # Letting the inversion take into account the biases in the data results in a much smoother solution, although with larger error estimates, and less ability to recover short-term fluctuations in surface height # # Error estimates # The algorithm can optionally estimate errors for the _z0_ and _dz_ fields, and the biases. These error estimates depend on: # * the spatial and temporal distribution of the data, # * the error magnitude estimates, and # * the bias magnitude estimates. # # Let's look at the time-dependent solution from earlier, but remove the central portion to make a data gap. We're making the solution a bit wider in the y direction to allow us a bit of space to play with the model resolution. Note that the solution takes much longer when we calculate errors. # + data_dt_gap = data_dt[np.abs(data_dt.x) > 1000] W1={'x': 10000.0, 'y': 400, 't': 2} S=smooth_xytb_fit(data=data_dt_gap, ctr=ctr, W=W1, spacing=spacing, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=True, max_iterations=1, dzdt_lags=[1]) # find a row and a column in the center of the simulation r0_dz, c0_dz, e0_dz = np.round(np.array(S['m']['dz'].shape)/2).astype(int) r0_z0, c0_z0 = np.round(np.array(S['m']['z0'].shape)/2).astype(int) # + fig=plt.figure(11) ax=fig.add_subplot(221) # plot the z0 for the model hh=plt.plot(S['m']['z0'].x, S['m']['z0'].z0[r0,:], label='z0') # plot the errors in z0 ax.plot(S['m']['z0'].x, S['m']['z0'].z0[r0_z0,:]+S['E']['z0'].z0[r0_z0,:], '--', color=hh[0].get_color()) ax.plot(S['m']['z0'].x, S['m']['z0'].z0[r0_z0,:]-S['E']['z0'].z0[r0_z0,:], '--', color=hh[0].get_color()) ax.set_ylabel('z0') ax=fig.add_subplot(222) ax.plot(S['m']['z0'].x, S['E']['z0'].z0[r0_z0,:]) ax.set_ylabel('$\sigma_{z0}$') ax=fig.add_subplot(223) hh=ax.plot(S['m']['dz'].x, S['m']['dz'].dz[r0_dz, :, 0]) hh=ax.plot(S['m']['dz'].x, S['m']['dz'].dz[r0_dz, :, 0]- S['E']['dz'].dz[r0_dz, :, 0],'--', color=hh[0].get_color() ) hh=ax.plot(S['m']['dz'].x, S['m']['dz'].dz[r0_dz, :, 0]+ S['E']['dz'].dz[r0_dz, :, 0],'--', color=hh[0].get_color() ) ax.set_ylabel('dz, t=-1') ax=fig.add_subplot(224) ax.plot(S['m']['dz'].x, S['E']['dz'].dz[r0_z0,:, 0]) ax.set_ylabel('$\sigma_{dz, t=-1}$') fig.tight_layout() # - # We see that the error estimates are small where data are present, then increase across the data gap. Errors are also larger towards the edges of the grid, because farther from the center, fewer datapoints are available to constrain each grid point. # # This solution took a long time (and a lot of memory) to calculate, beacause the grids had fine resolution. Since we probably don't care if our error estimates have high or low resolution, we can speed up the calculation substantially by degrading the resolution of the error solution, then interpolating back to the original grid resolution. # # + # reduced-resolution fit: SE = smooth_xytb_fit(data=data_dt_gap, ctr=ctr, W=W1, spacing={'z0':100, 'dz':100, 'dt':0.25}, E_RMS=E_RMS, reference_epoch=4, N_subset=None, compute_E=True, max_iterations=1, dzdt_lags=[1]) # reduced-resolution sigma estimate, interpolated to full resolution sigma_i = SE['E']['z0'].interp(S['m']['z0'].x, S['m']['z0'].y, field='z0', gridded=True) plt.figure(12) plt.plot(S['m']['z0'].x, sigma_0.z0[r0_z0,:], label='full-res') plt.plot(S['m']['z0'].x, sigma_i[r0_z0,:], label='half-res') plt.set_ylabel('$\sigma_{z0}') plt.legend(); # - # The reduced-resolution sigma estimate is very close to the full-resolution estimate, but takes very much less time to calculate. # ## Quantities derived in error propagation # # The errors in derived quantities cannot be calculated without an estimate of the covariance structure of the fit model. Since we are not including the covariance matrix in the output products (because it's too large to even calculate in most cases!) we pre-calculate error estimates for some derived quantities: # # * _dz\_dt\_lag\_1_ : dz/dt from epoch to epoch # * _dz\_dt\_lag\_4_ : dz/dt calculated on an annual basis # * _dz\_bar_ : the error in the central 50% of the grid (typically 40x40 km) # * _dz\_dt\_bar\_lag\_1_ , _dz\_dt\_bar\_lag\_4_ , dz/dt averaged over the central portion of the grid, at quarter-annual and annual resolution # # For the central-average quantities, we also provide an value for the area of the central estimate (including the effects of projection distortion and the fraction of the central area that is included in the ice mask). # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #

Nikola GetsDrawn

# This is a python script to generate the website GetsDrawn. It takes data from /r/RedditGetsDrawn and makes something awesome. It uses the Nikola web framework in to make a website that is mobile friendly. # # The script has been rewritten several times and developed over time # # The first script for rgdsnatch was written after I got banned from posting my artwork on /r/RedditGetsDrawn. The plan was to create a new site that displayed stuff from /r/RedditGetsDrawn. # # Currently it only displays the most recent 25 items on redditgetsdrawn. The script looks at the newest 25 reference photos on RedditGetsDrawn. It focuses only on jpeg/png images and ignores and links to none .jpg or .png ending files. # It is needed to instead of ignoring them files - get the image or images in some cases, from the link. # The photos are always submitted from imgur. # Still filter out the i.imgur files, but take the links and filter them through a python imgur module returning the .jpeg or .png files. # # # This is moving forward from rgdsnatch.py because I am stuck on it. # # TODO # # Fix the links that don't link to png/jpeg and link to webaddress. # Needs to get the images that are at that web address and embed them. # # Display artwork submitted under the images. # # Upload artwork to user. Sends them a message on redditgetsdrawn with links. # # More pandas # # Saves reference images to imgs/year/month/day/reference/username-reference.png # # Saves art images to imgs/year/month/day/art/username-line-bw-colour.png # # Creates index.html file with: # Title of site and logo: GetsDrawn # Last updated date and time. # # Path of image file /imgs/year/month/day/username-reference.png. # (This needs changed to just their username). # # Save off .meta data from reddit of each photo, saving it to reference folder. # username-yrmnthday.meta - contains info such as author, title, upvotes, downvotes. # Currently saving .meta files to a meta folder - along side art and reference. # # Folder sorting system of files. # websitename/index.html-style.css-imgs/YEAR(15)-MONTH(2)-DAY(4)/art-reference-meta # Inside art folder # Currently it generates USERNAME-line/bw/colour.png 50/50 white files. Maybe should be getting art replies from reddit? # # Inside reference folder # Reference fold is working decent. # it creates USERNAME-reference.png / jpeg files. # # Currently saves username-line-bw-colour.png to imgs folder. Instead get it to save to imgs/year/month/day/usernames.png. # Script checks the year/month/day and if folder isnt created, it creates it. If folder is there, exit. # Maybe get the reference image and save it with the line/bw/color.pngs # # The script now filters the jpeg and png image and skips links to imgur pages. This needs to be fixed by getting the images from the imgur pages. # It renames the image files to the redditor username followed by a -reference tag (and ending with png of course). # It opens these files up with PIL and checks the sizes. # It needs to resize the images that are larger than 800px to 800px. # These images need to be linked in the index.html instead of the imgur altenatives. # # Instead of the jpeg/png files on imgur they are downloaded to the server with this script. # # Filter through as images are getting downloaded and if it has been less than certain time or if the image has been submitted before # # Extending the subreddits it gets data from to cycle though a list, run script though list of subreddits. # # Browse certain days - Current day by default but option to scroll through other days. # # Filters - male/female/animals/couples etc # Function that returns only male portraits. # tags to add to photos. # Filter images with tags # # # import os import requests import re import json import time from time import gmtime, strftime #import nose import arrow import shutil import subprocess import getpass from google.cloud import translate import pygcloud # + #url = 'https://www.reddit.com/r/redditgetsdrawn.json' #headers = { # 'User-Agent': '', # This is another valid field #} #response = requests.get(url, headers=headers) # - def getreference(subreddit): url = 'https://www.reddit.com/r/{}.json'.format(subreddit) headers = {'User-Agent': ''} # This is another valid field return(requests.get(url, headers=headers).json()) subjs = getreference('redditgetsdrawn') def lenrefernce(): return(len(subjs['data']['children'])) def listgallery(ldir): return(os.listdir(ldir + '/galleries')) # + #listgallery('/mnt/c/Users/luke/Documents/zh-artctrl') # + #totart = len(resjs['data']['children']) # + #totart # + #resjs['data']['children'][5]['data']['id'] # - allimg = os.listdir('/mnt/c/Users/luke/Documents/zh-artctrl/galleries') os.chdir('/mnt/c/Users/luke/Documents/zh-artctrl/galleries') #datet = arrow.now(resjs['data']['children'][tota]['data']['created']) # + #from google.cloud import translate # Instantiates a client #translate_client = translate.Client() # The text to translate #text = u'Hello, world!' # The target language #target = 'zh_cn' # Translates some text into Russian #translation = translate_client.translate( # text, # target_language=target) #print(u'Text: {}'.format(text)) #print(u'Translation: {}'.format(translation['translatedText'])) # - os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/wcmckee/test.json" # + #hanjs = requests.get('https://glosbe.com/transliteration/api?from=Han&dest=Latin&text=我&format=json') #pingzh = hanjs.json() #print(pingzh['text']) # + def downloadimgs(wotsubred, ldir): getreference(wotsubred) resjs = subjs #subjs os.chdir(ldir) for tota in range(0, lenrefernce()): if '.jpg' or '.png' in (resjs['data']['children'][tota]['data']['url']): idpost = resjs['data']['children'][tota]['data']['id'] authinfo = (resjs['data']['children'][tota]['data']['author']) datet = arrow.get(resjs['data']['children'][tota]['data']['created']) translate_client = translate.Client() transzh = translate_client.translate(resjs['data']['children'][tota]['data']['title'], target_language='zh_cn') hanjs = requests.get('https://glosbe.com/transliteration/api?from=Han&dest=Latin&text={}&format=json'.format(transzh['translatedText'])) pingzh = hanjs.json() #print(pingzh['text']) #if authinfo + '.png' not in listgallery(ldir): #print('image not there') subprocess.call('wget -O galleries/{}.png {}'.format(idpost, resjs['data']['children'][tota]['data']['url']), shell=True) with open(ldir + '/posts/' + authinfo + '.meta', 'w') as aupos: aupos.write('.. title: {}\n.. slug: {}\n.. date: {}\n.. tags: tagsz\n.. link:\n.. description:\n.. type: text'.format(idpost, idpost, datet)) with open(ldir + '/posts/' + authinfo + '.md', 'w') as aumeta: aumeta.write('# {}\n\n![{}]({})\n\n{}'.format(authinfo, idpost, '/galleries/' + idpost + '.png', resjs['data']['children'][tota]['data']['title'] + '\n\n' + transzh['translatedText'] + '\n\n' + pingzh['text'])) #else: # pass #lisnum = (os.listdir(ldir).count(authinfo + '.png')) #print(type(lisnum)) #lisnew = lisnum + 1 #subprocess.call('wget -O {}{}.png {}'.format(authinfo, lisnew, resjs['data']['children'][tota]['data']['url']), shell=True) #with open(ldir + '/posts/' + authinfo + str(lisnew) + '.meta', 'w') as aupos: # aupos.write('.. title: {}\n.. slug: {}\n.. date: {}\n.. tags: tagsz\n.. link:\n.. description:\n.. type: text'.format(authinfo, authinfo, datet)) #with open(ldir + '/posts/' + authinfo + str(lisnew) + '.md', 'w') as aumeta: # aumeta.write('![{}]({})\n\n{}'.format(authinfo, '/galleries/' + authinfo + str(lisnew) + '.png', resjs['data']['children'][15]['data']['title'])) # - downloadimgs('redditgetsdrawnbadly', '/mnt/c/Users/luke/Documents/zh-artctrl') # + #pygcloud.createbucket('getsdrawn') # + #pygcloud.makebucketpublic('lionsheart') # - def deploysite(blogdir, syncname): os.chdir(blogdir) subprocess.call('nikola build', shell=True) pygcloud.makebucketsync(syncname, blogdir + '/output') deploysite('/mnt/c/Users/luke/Documents/zh-artctrl', 'getsdrawn') def submitart(blogdir, idreference): os.chdir(blogdir) for root, dirs, files in os.walk(blogdir + '/galleries/', topdown = False): for name in files: if idreference in name: print(os.path.join(root, name)) #for name in dirs: # + #imagefile = submitart('/mnt/c/Users/luke/Documents/zh-artctrl', 'aco1f3') # - from PIL import Image import shutil # + def createart(blogdir, idreference, saveloc): img = Image.open(blogdir + '/galleries/' + idreference + '.png') basewidth = 400 wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) #im = Image.new("RGB", (4960, 3508), "white") newImage = Image.new('RGB', (4960, 3508), 'WHITE') newImage.paste(img) newImage.save(saveloc + '/' + idreference + '-reference.png') # - def batchcreateart(blogdir): idref = os.listdir(blogdir + '/galleries/') for idr in idref: createart(blogdir, idr.replace('.png', ''), '/home/wcmckee/imgtest/') batchcreateart('/mnt/c/Users/luke/Documents/zh-artctrl') def chopart(blogdir, idrefernce, saveloc): #removes the reference image from finished artwork by croping the left of image by 200 px img = Image.open(blogdir + '/galleries/' + idrefernce + '-reference.png') img.crop((400,0,0,0)) img.save(saveloc + '/' + idrefernce + '-final.png') createart('/mnt/c/Users/luke/Documents/zh-artctrl', 'acwwcp') chopart('/mnt/c/Users/luke/Documents/zh-artctrl', 'acwwcp') from IPython.display import Image as imgdisplay # + #imgdisplay('/mnt/c/Users/luke/Documents/zh-artctrl/galleries/aco1f3.png') # - # + from PIL import Image # Create an Image object from an Image imageObject = Image.open('/mnt/c/Users/luke/Documents/zh-artctrl/galleries/art_control-acvivp.png') # Crop the iceberg portion cropped = imageObject.crop((200,0,0,0)) # Display the cropped portion cropped.show() # - cropped.save('crop.png') imgdisplay(filename='crop.png') def copytousb(blogdir, idreference, artistname, copyto): shutil.copyfile(blogdir + '/galleries/' + artistname + '-' + idreference + '.png', copyto + artistname + '-' + idreference + '.png') # + #copytousb('/mnt/c/Users/luke/Documents/zh-artctrl', 'acvivp', 'art_control', '/mnt/f') # - imgdisplay(filename='/mnt/f/art_control-acvivp.png') # + ##createart('/mnt/c/Users/luke/Documents/zh-artctrl', 'acvivp', 'art_control') # + #imgdisplay(filename='/mnt/c/Users/luke/Documents/zh-artctrl/galleries/art_control-acvivp.png') # - img = Image.open('/mnt/c/Users/luke/Documents/zh-artctrl/galleries/aco1f3.png') basewidth = 200 wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) img.save('test.png') im = Image.new("RGB", (2000, 1000), "white") im.size newImage.size newImage = Image.new('RGB', (800, 600), 'WHITE') newImage.paste(img) newImage.save('img.png') # + #imgdisplay(filename='img.png') # - # + #from IPython.display import Image as imgdisplay #imgdisplay(filename='test.png') # + from PIL import Image basewidth = 300 img = Image.open(imagefile) wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) img.save('test.png') # - # + #from codeq_nlp_api import CodeqClient #client = CodeqClient(user_id="", user_key="") #text = "We’d love to see your take on our little zoo family!" #document = client.analyze(text) #for sentence in document.sentences: # print(sentence.sentiment) # + ##for sentence in document.sentences: # print(sentence.emotions) # + ##for sentence in document.sentences: # print(sentence.to_dict()) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline # # # Extracting the time series of activations in a label # # # We first apply a dSPM inverse operator to get signed activations # in a label (with positive and negative values) and we then # compare different strategies to average the times series # in a label. We compare a simple average, with an averaging # using the dipoles normal (flip mode) and then a PCA, # also using a sign flip. # # # + # Author: <> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse print(__doc__) data_path = sample.data_path() label = 'Aud-lh' label_fname = data_path + '/MEG/sample/labels/%s.label' % label fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' snr = 3.0 lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) src = inverse_operator['src'] # Compute inverse solution pick_ori = "normal" # Get signed values to see the effect of sign filp stc = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori=pick_ori) label = mne.read_label(label_fname) stc_label = stc.in_label(label) mean = stc.extract_label_time_course(label, src, mode='mean') mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip') pca = stc.extract_label_time_course(label, src, mode='pca_flip') print("Number of vertices : %d" % len(stc_label.data)) # View source activations plt.figure() plt.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5) h0, = plt.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3) h1, = plt.plot(1e3 * stc_label.times, mean_flip.T, 'g', linewidth=3) h2, = plt.plot(1e3 * stc_label.times, pca.T, 'b', linewidth=3) plt.legend([h0, h1, h2], ['mean', 'mean flip', 'PCA flip']) plt.xlabel('Time (ms)') plt.ylabel('Source amplitude') plt.title('Activations in Label : %s' % label) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # callbacks # Type an introduction of the package here. # + hide_input=true from fastai.gen_doc.nbdoc import * from fastai.callbacks import * # + hide_input=true show_doc(F) # - # `F` # + hide_input=true show_doc(PIL) # - # `PIL` # + hide_input=true show_doc(abc) # - # `abc` # + hide_input=true show_doc(collections) # - # `collections` # + hide_input=true show_doc(csv) # - # `csv` # + hide_input=true show_doc(fp16) # - # `fp16` # + hide_input=true show_doc(general_sched) # - # `general_sched` # + hide_input=true show_doc(gzip) # - # `gzip` # + hide_input=true show_doc(hashlib) # - # `hashlib` # + hide_input=true show_doc(hooks) # - # `hooks` # + hide_input=true show_doc(html) # - # `html` # + hide_input=true show_doc(inspect) # - # `inspect` # + hide_input=true show_doc(lr_finder) # - # `lr_finder` # + hide_input=true show_doc(math) # - # `math` # + hide_input=true show_doc(mimetypes) # - # `mimetypes` # + hide_input=true show_doc(mixup) # - # `mixup` # + hide_input=true show_doc(nn) # - # `nn` # + hide_input=true show_doc(np) # - # `np` # + hide_input=true show_doc(one_cycle) # - # `one_cycle` # + hide_input=true show_doc(operator) # - # `operator` # + hide_input=true show_doc(optim) # - # `optim` # + hide_input=true show_doc(os) # - # `os` # + hide_input=true show_doc(patches) # - # `patches` # + hide_input=true show_doc(patheffects) # - # `patheffects` # + hide_input=true show_doc(pd) # - # `pd` # + hide_input=true show_doc(pickle) # - # `pickle` # + hide_input=true show_doc(plt) # - # `plt` # + hide_input=true show_doc(random) # - # `random` # + hide_input=true show_doc(re) # - # `re` # + hide_input=true show_doc(rnn) # - # `rnn` # + hide_input=true show_doc(scipy) # - # `scipy` # + hide_input=true show_doc(shutil) # - # `shutil` # + hide_input=true show_doc(spacy) # - # `spacy` # + hide_input=true show_doc(torch) # - # `torch` # + hide_input=true show_doc(tvm) # - # `tvm` # + hide_input=true show_doc(typing) # - # `typing` # + hide_input=true show_doc(warnings) # - # `warnings` # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="oo4hlNqu43zp" # #Homework 6: Clustering (K-means, DP-means) # by and # # **Due date**: March 24, Wednesday by 11:59pm # # **Late** due date: March 27, Saturday by 11:59pm # # This homework has a total of 100 points, with a 10-point bonus question at the end. **Online code along with library functions for K-means or DP-means are prohibited.** # # For all the functions you need to implement, feel free to change their signatures (inputs and returns) but do NOT change their names. # + [markdown] id="jIr7WQmm43zt" # ##**Question 1:** K-means (35 points) # # Here you will apply K-means on simple Gaussian data. You are given some code to assist you in implementing this clustering method. # + [markdown] id="8MVORa2A43zu" # ###**a.** Create a dataset and plot it (10 points) # Generate $3$ $2$D Gaussian clusters of data with the following means and covariances: # # $\boldsymbol{\mu}_0 = [2,2]^T, \boldsymbol{\mu}_1 = [-2,2]^T, \boldsymbol{\mu}_2 = [0,-2]^T$, # # $\Sigma_0 = [[0.02,0];[0,0.02]]$, $\Sigma_1 = [[0.2,0];[0,0.2]]$, $\Sigma_2 = [[0.05,0];[0,0.05]]$ # # Let each cluster of data have $50$ points. Complete the scatter_by_category function and use it create a colored scatter plot of the generated Gaussian data. # # **Important:** Assume **row** vectors for feature vectors and **column** vectors for mean vectors for the **entire** homework 6. This is a convention that makes matrix multiplications and dot products easier. # # Hints: # 1. Use rng.multivariate_normal() to generate the dataset. # 2. Use the imported seaborn library for plotting. Relevant documentation: https://seaborn.pydata.org/generated/seaborn.scatterplot.html # + id="NGS7AyWBY0R2" import numpy as np import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import numexpr as ne from scipy import sparse from numba import njit import time # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="iLqXr-qb43zu" outputId="3c5a9db8-9a0d-4669-cf9b-6e2a5956abf4" # Do NOT change the seed in any problem, unless you are explicitly allowed to # Use rng instances instead of using np.random directly, as recommended # by numpy manuals rng = np.random.default_rng(0) # Generate 2D Gaussian Data with 3 clusters of 3 different means # WRITE CODE HERE: # m is an optional argument for cluster means; when supplied, the function # should plot them in red # As mentioned, the means are column vectors, so m has shape (d, k), # where d is the number of features def scatter_by_category(X, labels, m=None): plt.figure(figsize=(5,5)) plt.xlim([-4, 3]) plt.ylim([-3, 4]) # WRITE CODE HERE: scatter_by_category(DATA, DATA_labels) # + [markdown] id="uTwm23dUdELr" # ###**b.** K-means objective (5 points) # Implement the K-means objective (within-cluster sum of squares) function: $\sum\limits_{i=1}^k Var(C_i) = \sum\limits_{i=1}^k \sum\limits_{\mathbf x \in C_i} ||\mathbf x-\mathbf{\mu_i}||_2^2$ # # Note: we recommend that you either use njit or not use loops here. More note on njit can be found in question 3.b where a loop over the data is inevitable. # + id="WJAufnXpdE0B" @njit(fastmath=True,cache=True) def WCSS(X, labels, m): # WRITE CODE HERE: # + [markdown] id="SyA7SoGv43zw" # ###**c.** Implement K-means (20 points) # Implement K-means and apply it to the Gaussian data above. The code below contains comments of what needs to be coded up at a specific part of the code. Please write your code below these comments where necessary. # # Plot the created clusters per iteration in different colors. For convergence of the algorithm (stopping criterion), check whether the objective value becomes stationary (change less than a suitable threshold). Finally, print the final objective value. # # Notes: # 1. As always, try to use vectorized routines instead of loops whenever possible. This will be important in a later question. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="YEeEok_YFGBP" outputId="9673776c-1dee-4cbd-a9b3-4f0e9d9416d9" ## K-means implementation: # X is the points as row vectors, m is the initial centers as column vectors # maxT helps it terminate in case something goes wrong # Feel free to change the function signatures def K_means(X, m, maxT=10, threshold=.001, plots=False): (d, k), n = m.shape, X.shape[0] # WRITE CODE HERE: for t in range(maxT): # 1. Assignment step - Assign each sample to the nearest mean vector: # 2. Conditionally plot clustering results: if plots: scatter_by_category(X, labels, m) # 3. Mean updating - Update the cluster means # Note: be careful with empty clusters; you don't want division by 0 # If there are empty clusters, simply don't update their centers # 4. Check for convergence if 0 <= threshold: break # CHANGE THIS return labels, obj, t+1 # Do NOT change the seed in any problem, unless you are explicitly allowed to rng = np.random.default_rng(5) num_clusters = 3 m = np.asfortranarray(rng.random((2, num_clusters))) ne.evaluate('6*m - 3', out=m) # Magic formula ;) labels, obj, iters = k_means(DATA, m, plots=True) print('The algorithm took', iters, 'iterations to terminate.') print('WCSS:', obj) print(m) # + [markdown] id="L9zUlW4Sm_ib" # ##**Question 2:** The MNIST Dataset (25 points) # Here comes your first decently-sized dataset this semester, the MNIST Dataset. The dataset contains 70000 28x28 preprocessed images of handwritten didits. Before proceeding further, you should familiarize yourself with the dataset, and we have provided a few cells to help you. You can modify them as long as you don't mess up the dataset. # # Due to the large size of MNIST, the following cells might take a while to run, so be patient. # + colab={"base_uri": "https://localhost:8080/"} id="pJhSt35qmOLK" outputId="1f4aafd1-9057-492e-83e0-25e3923250f4" # Fetching the MNIST dataset takes a while # You only need to run this cell once per session start = time.process_time() from sklearn.datasets import fetch_openml X, y = fetch_openml('mnist_784', return_X_y=True, as_frame=False) print(time.process_time()-start, 'seconds') # + colab={"base_uri": "https://localhost:8080/", "height": 362} id="Ky7NAdWTQHOU" outputId="1502a37d-1099-4588-c07c-cb1823840d06" print(min(X[0]), max(X[0])) plt.imshow(X[0].reshape(28, 28), 'gray') plt.show() print('Fraction of X that is non-zero:', np.count_nonzero(X)/X.size) print(X.shape) print(X.nbytes, 'bytes') print('Are row elements contiguous?', X.data.c_contiguous) print(X.dtype) # + [markdown] id="hvV_x4EJQegC" # ###**a.** Preprocessing for speed (5 points) # Preprocessing is critical in ML for many reasons. One of them is that it can make the data more friendly to both your code and your hardware, potentially saving you tons of time. Based on the observations above, what things can you do to the MNIST data to speedup your algorithms without compromising the data? Explain why they are beneficial for speed for full credit. # # Notes: # 1. Although recommended, you do not have to apply all of your answers here in code. # 2. Assume your code will be run on typical modern CPU. # + colab={"base_uri": "https://localhost:8080/"} id="HLioIfSEHKdv" outputId="5534a5f8-ba04-44bb-8381-146de8dde86f" print(max(X[0])) # A hint to one of the things we did to X # + [markdown] id="Pof7JDXSG60w" # ###**b.** K-means on MNIST (10 points) # Now peform K-means on MNIST. We have provided the initializations for you, in the variable m2. This dataset will stress test your K-means implementation, so it's important that you use optimized routines. You can lower maxT if it takes too long, but no lower than 15. If your algorithm can't terminate in 60 seconds in our testing, there will be some penalty. If you really have trouble getting it to terminate in a reasonable amount of time, take a subset of X by performing something like Xs = X[:10000], but there will be some penalty. # # Hints: # 1. Avoid loops that iterate over the dataset. If you really hav issue vectorizing, try to use the nopython JIT compiler (njit) from Numba to speed up your loops. See Question 3 code comments for more details on njit. This is not recommeneded however, as writing a jitted function usually requires more work than simply finding vectorized equivalents. You are also unlikely to match the speed of Numpy's vectorized routines this way. # 2. $||\mathbf a - \mathbf b||_2^2 = \mathbf{a \cdot a} + \mathbf{b \cdot b} - 2\mathbf{a \cdot b}$ # 3. $\underset{\mathbf b \in \mathbf B}{\arg\min} ||\mathbf a - \mathbf b||_2^2 = \underset{\mathbf b \in \mathbf B}{\arg\min}~\mathbf{b \cdot b} - 2\mathbf{a \cdot b}$ # + colab={"base_uri": "https://localhost:8080/"} id="kJV4Im7qYit_" outputId="804dafdf-1b79-4971-95d7-ea7b537ac022" # Definitely do NOT try naive printing/plotting this dataset # Do NOT set plots=True when calling k_means rng = np.random.default_rng(70) # USE THESE parameters; note the names are different from those # in question 1 to prevent accidental plotting m2 = np.asfortranarray(rng.random((784, 10), np.float32)) m2/= 4 maxT = 30 threshold = 16 start = time.process_time() # Run K-means on MNIST HERE: print(time.process_time()-start, 'seconds') print('The algorithm took', iters, 'iterations to terminate.') print('WCSS:', obj) # + [markdown] id="u1uVLyzzPHC2" # ###**c.** Show the final centers as images (5 points) # Hint: use subplots # # + colab={"base_uri": "https://localhost:8080/", "height": 313} id="eFhUNdHCVzaQ" outputId="5f1f8a60-2469-4f94-b8f9-71d08ef6685b" # WRITE CODE HERE: figsize=(12, 5) # + [markdown] id="GlvkcnA7Pe6B" # ###**d.** Avoiding bad initializations (5 points) # A simple way to mitigate the risk of bad initializations is to use multiple random ones to obain multiple solutions, compare them, and keep the best one according to an objective function. Run K-means on MNIST 3 more times, each with a different initialization, and keep only the current best solution. Finally, show the best solution's centers as images and report WCSS. # # Note: # 1. there is no time limit for this part, but you need to run your code to generate your own images. The run time shown in the preview is just for reference. # 2. Use initializations methods analogous to the one in part b if you want to get the same results as shown. # + colab={"base_uri": "https://localhost:8080/"} id="8RbkeoVzC1iM" outputId="24782fc3-e4f7-41c6-b5a3-0ee746ed016a" start = time.process_time() for i in range(3): m_tmp = np.asfortranarray(rng.random((784, 10), np.float32)) m_tmp/= 4 labels_tmp, obj_tmp, iters = k_means(X, m_tmp, 30, 16, Xs=Xs, X2=X2) if obj_tmp < obj: obj, m2 = obj_tmp, m_tmp print(time.process_time()-start, 'seconds') print('WCSS:', obj) # The lowest WCSS from the four solutions # + colab={"base_uri": "https://localhost:8080/", "height": 313} id="Ik0cr5wAYkNo" outputId="1571cc7f-9221-4c82-d197-dc4d24f20835" # WRITE CODE HERE: figsize=(12, 5) # + [markdown] id="e0N9X6iA43zx" # ##**Question 3:** DP-means (40 points) # # Here, you will implement DP-means and test it out on the dataset from question 1. # # # + [markdown] id="TrlNsiJB43zx" # ###**a.** The role of $\lambda$ (5 points) # Describe what the $\lambda$ parameter does in the DP-means algorithm discussed in class. # + [markdown] id="bkh8Rd3y43zx" # # + [markdown] id="uENwG6Zs43zz" # ###**b.** Implement DP-means (25 points) # Implement DP-means and apply it to the 2D Gaussian data from question 1. The code below contains comments of what needs to be coded up at a specific part of the code. Please write your code below these comments where necessary. # # Plot the created clusters per iteration in different colors. Similar to K-means, check how much the objecive value changed to determine convergence. The DP-means objective is given by $\lambda k+ \sum\limits_{i=1}^k Var(C_i)$ # # Use $\lambda$ = 4 and plots=True so your function produce plots over the iterations. # + id="pi7vvo1iVh5E" def DP_means_obj(X, m, Lambda, labels, X2, m2): # WRITE CODE HERE: ## DP-means sub-routine: # Remove @njit if you have issues with it and don't want to do the bonus # question. Numba can speedup for loops and Numpy sigificantly, but it often # has issues with python lists, scipy, pyplot, etc. It should be able to handle # everything you need to do in onepass. Numba really prefers row vectors, # so passing in m.T instead of m is highly recommended. As always, you are # welcome to change te function signature as long as you make it work. @njit(fastmath=True, cache=True) def onepass(X, mT, Lambda, labels, X2, m2): # welcome to change the signature (k, d), n = mT.shape, X.shape[0] # WRITE CODE HERE: # Iterating over X is required by the DP-means algorithm for i in range(n): # 1. Calculate distance from current point to all centers # 2. Check how far the clostest center is; add the currect point as # a center if necessary (add it to mT) if 0 > Lambda: # CHANGE THIS pass else: # assign point to the closest center pass # m should be returned because the updates performed may not be in place return mT.T # reversing the transpose # + id="O15FHyzF7quq" ## DP-means main function: def DP_means(X, m, Lambda=2, maxT=10, threshold=.001, plots=False): (d, k), n = m.shape, X.shape[0] # WRITE CODE HERE: for t in range(maxT): # 1. Implement the onepass function and use it here # 2. Conditionally plot clustering results: if plots: scatter_by_category(X, labels, m) # 3. Mean updating - Update the cluster means # Similar to K-means, be careful about empty clusters # 4. Check for convergence based on how much k and m changed if False: # CHANGE THIS break return m, t+1 # + colab={"base_uri": "https://localhost:8080/", "height": 707} id="3Gvjfg2Q7hdP" outputId="fd825ab6-bee8-4f09-c6cc-f26cdf2004d5" rng = np.random.default_rng(0) m = np.asfortranarray(rng.random((2, 1))) # WRITE CODE HERE: print(m) print('Value of the objective:', obj) # + [markdown] id="XpWnlOxH7unT" # ###**c.** Reflection (4 points) # If your implementation runs correctly, you should see the solution having one more center than the "expected" 3. **Explain why this happens and suggest a simple, generalizable fix for this.** (generlizable means you can apply this fix easily on other datasets). Also demonstrate this fix in code for full credit. # + [markdown] id="rLph8NyQbdIa" # This is because the initial center is provided at random and is not close enough to any point. Choosing a random point from the dataset to serve as the initial center would solve this issue. # + colab={"base_uri": "https://localhost:8080/", "height": 723} id="ob8mNUcXodmM" outputId="99082d78-7e7e-496f-b8e8-b9a146522d94" # WRITE CODE HERE: print(m) print('The algorithm took', iters, 'iterations to terminate.') print('Value of the objective:', obj) # + [markdown] id="Ppy9jyH843z0" # ###**d.** Try different $\lambda$ values (6 points) # Run DP-means with the following $\lambda$ values: # # $\underline{\lambda} = [1,9,16]$ # # For each $\lambda$ value, plot the resulting clusters and report the final objective value. # # Note: The solution shown is obtained without the expected fix in part c. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Buw3140OcmvU" outputId="da6870b3-fde6-4bb7-c14a-c152ca5b2f75" rng = np.random.default_rng(50) print('Lambda = 1') # WRITE CODE HERE: print('The algorithm took', iters, 'iterations to terminate.') print('Value of the objective:', obj) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="qedjA4sWgm_b" outputId="26bc03ad-e74e-447f-af3e-56b556572fa1" print('Lambda = 9') # WRITE CODE HERE: print(m) print('The algorithm took', iters, 'iterations to terminate.') print('Value of the objective:', obj) # + colab={"base_uri": "https://localhost:8080/", "height": 740} id="JQlLRgztguZS" outputId="6174ce89-e110-4e74-b4ef-5467fe142b33" print('Lambda = 16') # WRITE CODE HERE: print(m) print('The algorithm took', iters, 'iterations to terminate.') print('Value of the objective:', obj) # + [markdown] id="fEchEfeeyQo-" # ##**Question 4:** DP-means on MNIST (Bonus 10 points) # This extra credit is fairly open-ended, just get **your** DP-means implementation to run reasonably well on MNIST. # # You may run DP-means on a subset of MNIST, but the subset should contain at least 10000 samples. # # Aim to achieve the following (ranked by importance): # 1. Show the final centers as images. Your images need to look like those shown in the preview. # 2. Find a suitable $\lambda$ and initial m that produce 10 to 30 clusters. # 3. maxT must be at least 10. (Our solution uses 30) # 4. The run time should be within 60 seconds. # # Notes: # 1. $\lambda$ needs to be relatively large. It's better to start high and then decrease to find an appropriate value. # 2. All solution previews in this homework are generated from running on the entire dataset. # + colab={"base_uri": "https://localhost:8080/"} id="tVkK_EITCBnI" outputId="7b556c5a-d3dd-4682-acc3-a6bf700438ff" # Initialize HERE: start = time.process_time() # Run DP-means on MNIST HERE: print(time.process_time() - start, 'seconds') print('Shape of the means matrix:', m2.shape) print('The algorithm took', iters, 'iterations to terminate.') print('Value of the objective:', obj) # + colab={"base_uri": "https://localhost:8080/", "height": 485} id="im5l4MWmer8e" outputId="8d668af7-b54f-423d-8a97-b455247a28f2" # WRITE CODE HERE: figsize=(10, 8) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.4 64-bit # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import os from tqdm import tqdm tqdm.pandas() from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.svm import SVR from sklearn.model_selection import RandomizedSearchCV,GridSearchCV # - train_df=pd.read_csv('./Data/train.csv') train_df.head() columns=[f'ROI{i}-ROI{j}' for i in range(1,116) for j in range (i+1,117) ] def get_corr(row,test=False): if test: brain = np.array(row[4:]) else: brain = np.array(row[5:]) brain = brain.reshape((116, 115)).astype(float) corr = np.corrcoef(brain) flat = corr[np.triu_indices(corr.shape[0], 1)] return flat.tolist() corr=train_df.progress_apply(lambda x :get_corr(x),axis=1) # + corr_df=pd.DataFrame(np.array(corr.values.tolist()),columns=columns).fillna(0) # - corr_df # + #normalized_corr_df=(corr_df-corr_df.mean())/corr_df.std() # + # normalized_corr_df=(corr_df-corr_df.min())/(corr_df.max()-corr_df.min()) # - new_train=corr_df #new_train['var1']=train_df['var1'] #new_train['y']=train_df['y'] first_cols = train_df.iloc[:, [1,2,3,4]] new_train = first_cols.join(corr_df) new_train['var2'].replace({'A':0, 'C':1}, inplace = True) new_train['var3'].replace({'A':0, 'C':1}, inplace = True) new_train new_train['mean']=np.mean(new_train.iloc[:,4:],axis=1) new_train['max']=np.max(new_train.iloc[:,4:],axis=1) new_train['min']=np.min(new_train.iloc[:,4:],axis=1) new_train new_train.groupby('y').size().sort_values(ascending=False) new_train=new_train.groupby('y').mean().reset_index() new_train # # Train # ## Linear Regession reg = LinearRegression() X_train, X_test, y_train, y_test = train_test_split(new_train.drop(columns='y'), new_train['y'], test_size=0.25, random_state=42) reg.fit(X_train,y_train) y_pred=reg.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) # ## SVR params = [{'kernel' : ['poly'], 'C' : [1,5,10], 'degree' : [1,2,3,4]}] #'coef0' : [0.01,10,0.5], #'gamma' : ('auto','scale')}] svr_reg=SVR() grids = GridSearchCV(svr_reg,params,cv=3,verbose=5,n_jobs=-1) grids.fit(X_train,y_train) grids.best_params_ y_pred=grids.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) #retrain on all dataset svr_reg=SVR(C=1, degree= 1, kernel= 'poly') svr_reg.fit(new_train.drop(columns='y'),new_train['y']) plt.scatter(y_test,y_test-y_pred,) y_pred=svr_reg.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) svr_reg_poly=SVR(kernel='poly',degree=2) svr_reg_poly.fit(X_train,y_train) y_pred=svr_reg_poly.predict(X_test) print(mean_squared_error(y_test, y_pred,squared=False)) svr_reg_poly.fit(new_train.drop(columns='y'),new_train['y']) # ## K-neigbors regression from sklearn.neighbors import KNeighborsRegressor for k in range(30,45): neigh = KNeighborsRegressor(n_neighbors=k,weights='distance').fit(X_train, y_train) y_pred=neigh.predict(X_test) print(k,mean_squared_error(y_test, y_pred,squared=False)) neigh = KNeighborsRegressor(n_neighbors=33,weights='distance',p=2).fit(X_train, y_train) y_pred=neigh.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) neigh = KNeighborsRegressor(n_neighbors=33,weights='distance',p=2).fit(new_train.drop(columns='y'),new_train['y']) y_pred=neigh.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) # ## Elastic net # + from sklearn.linear_model import ElasticNetCV regr = ElasticNetCV(cv=3, random_state=0) regr.fit(X_train,y_train) # - y_pred=regr.predict(X_test) mean_squared_error(y_test, y_pred,squared=False) # # Test test_df=pd.read_csv('./Data/test.csv') test_df corr_test=test_df.progress_apply(lambda row:get_corr(row,test=True),axis=1) test_final_df=pd.DataFrame(np.array(corr_test.values.tolist()),columns=columns).fillna(0) # + #normalized_corr_df_test=(test_final_df-test_final_df.mean())/test_final_df.std() # - test_final_df test_df first_cols_test = test_df.iloc[:, [1,2,3]] new_test = first_cols_test.join(normalized_corr_df_test) new_test['var2'].replace({'A':0, 'C':1}, inplace = True) new_test['var3'].replace({'A':0, 'C':1}, inplace = True) new_test new_test['mean']=np.mean(new_test.iloc[:,3:],axis=1) new_test['min']=np.min(new_test.iloc[:,4:],axis=1) new_test['max']=np.max(new_test.iloc[:,4:],axis=1) new_test y_pred_test=svr_reg.predict(new_test) plt.hist(y_pred_test) df_leaderboard=pd.DataFrame({'id':test_df['id'],'target':y_pred_test}) df_leaderboard df_leaderboard df_leaderboard.to_csv('G14_08',index=False) tuning_params=[{'n_neighbors':[range(1,45,2)], 'weights':['uniform','distance'], 'p':[1,2,4,6]}] neigh_r=NearestNeighbors() grids = GridSearchCV(neigh_r,tuning_params,cv=3,verbose=5,n_jobs=-1) grids.fit(X_train,y_train) grids.best_params_ y_test # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] toc=true #

Sumário

#
    # - # # 4 You are given two random variables X and Y. # - E(X) = 0.5, Var(X) = 2 # - E(Y) = 7, Var(Y) = 3.5 # - cov (X, Y) = -0.8 # # Find the variance of the random variable Z = 2X - 3Y. # $(1)\operatorname {Var}(Z)=\operatorname {E}(Z^2)-\operatorname {E}(Z)^2$ # first term: # # $\operatorname {E}(Z)=\operatorname {E}[(2X-3Y)]$ = # $\operatorname {E}(2X)-\operatorname {E}(3Y)$ = # # $2\operatorname {E}(X)-3\operatorname {E}(Y)$ = $2*0.5-3*7 = -20$ # Second term: # # $\operatorname {E}(Z^2)=\operatorname {E}[(2X-3Y)^2]$ # # $\operatorname {E}(Z^2)=\operatorname {E}[4X^2-12XY+9Y^2]$ # # $\operatorname {E}(Z^2)=\operatorname {E}(4X^2)-\operatorname {E}(12XY)+\operatorname {E}(9Y^2)$ # # $\operatorname {E}(Z^2)=4\operatorname {E}(X^2)-12\operatorname {E}(XY)+9\operatorname {E}(Y^2)$ # From variance and covariance definitions: # # $\operatorname {E}(X^2) = \operatorname {Var}(X)+\operatorname {E}(X)^2 = 2+0.25 = 2.25$ # # $\operatorname {E}(Y^2) = \operatorname {Var}(Y)+\operatorname {E}(Y)^2 = 3.5+49 = 52.5$ # # $\operatorname {E}(XY) = \operatorname {cov}(XY)+\operatorname {E}(X)\operatorname {E}(Y) = -0.8+7*0.5 = 2.7$ # Therefore: # $\operatorname {E}(Z^2)=4*2.25-12*52.5+9*2.7=-617.5$ # take the equation (10): # # $(1)\operatorname {Var}(Z)=\operatorname {E}(Z^2)-\operatorname {E}(Z)^2$ = # # **$(-20)^2-(-617.5) = 1017.5 $** # # Var(z) = 1017.5 # # # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Detecting oscillations & other temporal dynamics as null-hypothesis testing in spectral domain # # Existing (and one might say, the dominant) view of neural oscillation is that it is the default: magnetic & electrical recordings from the scalp and cortex are always composed of oscillations at every frequency, even if they are not readily visible in the time series or power spectrum. One important consequence is that effects of oscillations are for the most part constrained to measurement of relative power differences, either between experimental conditions, or between "oscillations" of different frequencies (e.g., bandratios). Another important consequence, of course, is the idea that neural dynamics is always oscillatory, and their visibility in the power spectrum depends on the synchrony of many sub-oscillators. # # Spectral parameterization (FOOOF) is the first step towards delineating between aperiodic and periodic components from the power spectral density (PSD). Here, we explore the concept that neural oscillations, and other meaningful events with temporal structure, are deviations from a baseline (or null hypothesis) composed purely of linear stochastic noise (i.e., colored noise). # %matplotlib inline import numpy as np from scipy import random, signal from statsmodels.tsa.stattools import acf import matplotlib.pyplot as plt from neurodsp import sim plt.rcParams['axes.spines.top']=False plt.rcParams['axes.spines.right']=False plt.rcParams['image.cmap'] = 'inferno' # + amp = 100 fs = 1000 T = 300 t = np.arange(0,T,1/fs) sig = random.randn(fs*T)*amp plt.figure(figsize=(12,3)) plt.plot(t, sig, 'k', alpha=0.5) plt.xlabel('time (s)');plt.ylabel('amplitude') plt.xlim([0,2]) # - # In the cell above, we've generated white noise from a normal distribution, sampled at 1000Hz, and of a certain amplitude. # # The main operation we'll be working with here is the conversion of the time series to a series of spectral estimates. We'll use short-time windowed Fourier Transform (STFT) here, but the ideas apply similarly to narrowband bandpass or wavelet transform and Hilbert Transformed data, with some caveats concerning filter bandwidth, etc. # # STFT converts the time series into a collection of Fourier coefficients over time (`t_stft`), at frequencies (`f_stft`) determined by the STFT window length (`npserg`). Crucially, these coefficients are complex, from which the amplitude/power and phase are typically computed. But we can also visualize the coefficents as complex numbers in the 2D complex plane. # # Below, we compute the STFT, and plot a single time-slice of Fourier coefficients (at time 0, corresponding to the signal from t=0 to t=1), in the complex plane, as well as its power and phase spectra. This is not directly what we want to investigate, but builds towards the theoretical point. nperseg=1000 f_stft, t_stft, stft = signal.stft(sig, fs, np.ones(nperseg), nperseg=nperseg, noverlap=0, boundary=None, return_onesided=False) # The STFT is performed above with some non-default parameter configurations, which are actually chosen to retrieve the unaltered per-window DFTs (otherwise, for example, applying a Hanning window alters some of the conserved quantities below). # + def plot_triplet(coefs, x_axis, x_label): plt.subplot(1,3,1) plt.scatter(coefs.real, coefs.imag, s=10, c=x_axis) plt.xlabel('real');plt.ylabel('imag'); plt.subplot(1,3,2) plt.plot(x_axis, np.absolute(coefs)**2, '.k') plt.xlabel(x_label);plt.ylabel('power'); plt.subplot(1,3,3) plt.plot(x_axis, np.angle(coefs), '.k') plt.xlabel(x_label);plt.ylabel('phase'); plt.tight_layout() # a single time-slice of STFT (one Fourier Transform) plt.figure(figsize=(12,4)) plot_triplet(stft[:,0], f_stft, 'frequency') # - # In the above visualization of _single time-slice of the STFT_, we can observe some of the properties of the Fourier Transform. Namely, it is a linear projection of the time series onto the set of complex DFT bases. Because the signal we've generated is Gaussian and random, linear projection preserves this property, turning the 1D-Gaussian vector (time series) into a vector of 2D complex Gaussian distribution. Each dot in the plots above is the Fourier coefficient/power/phase at a _single frequency_, and the first subplot is colored by the frequency. You can see that it is a Gaussian blob and independent of frequency (color is not correlated to 2D position). Similarly, power and phase are randomly distributed over frequencies (we'll get to the specifics later). # # Side note: because we've included the negative frequencies in the STFT computation above (`return_onesided=False`), the coefficients are symmetric about the real axis, i.e., power is symmetric around 0Hz, and phase is anti-symmetric. # # --- # More formally stated, the Fourier coefficients at any given frequency (f) is a complex random variable (RV) that follows a particular distribution. The plots above look at the coefficients of all frequencies at a single time-slice. We can also look at the coefficients of a single frequency, over all time slices. i_freq = 10 plt.figure(figsize=(12,4)) plot_triplet(stft[i_freq,:], t_stft, 'time (s)') # This plot looks similarly disordered as the one above, but is different in that it is aggregated across _time_, not _frequencies_ (hence losing the symmetry). However, the similarities reveal the key point: under the null hypothesis that the signal is linear stochastic noise, the coefficients at a single frequency across time is a R.V. that also follows the (complex) 2D Gaussian distribution. One can derive the appropriate distribution of power and phases accordingly. # # # # Criterion 1: univariate distributions # # For any given frequency, if $X(t)$ is the R.V. of complex coefficients, $P(t)$ the R.V. of power, and $\phi(t)$ the R.V. of phase: # # Null hypothesis 1: $X \sim norm(\begin{bmatrix}0 \\0 \end{bmatrix}, \begin{bmatrix}s^2 & 0\\0 & s^2\end{bmatrix})$ # # NH 2: $P \sim \chi^2(2) = exp(

    )$ # # NH 3: $\phi \sim uniform(-\pi, \pi)$ # # In words, $X$ follows a 2D isotropic Gaussian distribution (real and imag axes are independent, i.e., circular blob). # # $P$, the signal power or squared magnitude, is the sum of squares of 2 independent Gaussian R.V., hence follows the scaled chi-square distribution with DOF k=2 (equivalent to the exponential distribution), and $

    $ is the average signal power (which defines the power spectrum), where $

    = 2s^2$ # # $\phi$ is the phase angle of the 2D Gaussian blob, and is therefore uniformly distributed from $[-\pi, \pi]$ # # Checkout the same exact concept, but [in a physical context](https://stats.stackexchange.com/questions/65640/how-to-calculate-2d-standard-deviation-with-0-mean-bounded-by-limits). # + print('2s^2 = %.4f,

    = %.4f'%(np.std(stft[i_freq].real)**2+np.std(stft[i_freq].imag)**2, (np.absolute(stft[i_freq])**2).mean())) # a little off because we are computing s as the sample standard deviation and

    the sample average print('sum of empirical covariance matrix diagonals: %.4f'%np.diag(np.cov([stft[i_freq].real, stft[i_freq].imag])).sum()) # - # --- # ### A small detour on white noise # In general, and especially for neural signals, $

    $ and $s$ vary as a function of frequency. For the particular example of white noise, however, we can actually compute what s (and \) should be based on the signal variance. First, per Parseval's Theorem, the sum of squares of the time series equals to that of the Fourier Transform. Note that by default, scipy.stft normalizes the FT by nperseg to return the spectral _density_, i.e., preserving signal variance, not _sum of squares_. ss_ts = ((sig[:1000])**2).sum() ss_ft = (abs(stft[:,0])**2).sum() print('simulation signal variance: %.4f |variance from sum of squares: time series: %.4f, frequency spectrum: %.4f'%(amp**2, ss_ts/nperseg, ss_ft)) # For Gaussian noise with standard deviation `amp`, the signal variance is simply $var = amp^2$, which is the sum of squares divided by signal length. This is the total signal variance, which is then divided equally (for white noise only) across all frequencies. Therefore, for a DFT/STFT with `nfft = nperseg = 1000` (chosen by default), average power at each frequency is: $

    = \frac{amp^2}{nfft} = \frac{10000}{1000} = 10$. The power spectral density will be this number across all frequencies, with variation that decreases with increasing signal length. Then, $s = \sqrt{\frac{

    }{2}}$ psd = (abs(stft)**2).mean(1) plt.figure(figsize=(4,4)) plt.plot(f_stft, psd, 'k.', alpha=0.5) plt.xlabel('frequency (Hz)');plt.ylabel('PSD

    (V^2/Hz)'); print('sqrt(

    /2) = %.4f, s (real) = %.4f, s (imag) = %.4f'%((psd[i_freq]/2)**.5, np.std(stft[i_freq].real), np.std(stft[i_freq].imag))) # These numbers are all a little off because they are finite sample-estimates. # ### end of detour # ___ # # Criterion 2: dependence across frequencies # # Since $X(t)$ at each frequency is random variable, in the case of uncorrelated noise, there should be no correlation between $X_{f1}(t)$ and $X_{f2}(t)$, for any two frequencies $f_1, f_2$. This is similarly true for $P(t)$. Thus, we can compute the pairwise correlation matrix for power, and average phase difference, between frequencies across time. # # (__Note/to-do__: we can directly measure relationship between the 2D Gaussian RVs as well, but with a more complicated definition of covariance. In fact, operating directly on the complex numbers should detect amplitude-amplitude and phase-amplitude coupling "for free". In practice, we can implement complex covariance/correlation (which np.corrcoef automatically does), or coherence.) def compute_coh(stft, mag_normed=True): """ Compute the average vector difference between pairs of frequencies. The STFT is magnitude-normalized by default, such that the resulting output is a square matrix of complex numbers representing the average difference vector between pair-wise frequencies. The magnitude of the elements represents the degree of phase coherence, the phase represents the phase difference. When not normalized by magnitudes, the result is a magnitude-weighted coherence. Note that the resulting matrix is very similar to the complex covariance, except the cross terms are taken to be zero here. """ n_freqs = stft.shape[0] # compute the complex conjugate product between every pair of frequency if mag_normed: # normalize by magnitude to get unit-lengthed vectors return np.array([(s*(stft/abs(stft)).conj()).mean(1) for s in (stft/abs(stft))]) else: return np.array([(s*(stft).conj()).mean(1) for s in stft]) # + # just use the positive frequencies now that we've made the point about conservation of variance stft_pos = stft[f_stft>=0] f_stft_pos = f_stft[f_stft>=0] stft_magnormed = stft_pos/abs(stft_pos) # power correlation ft_power = abs(stft_pos)**2 # otherwise known as spectrogram corrmat_power = np.corrcoef(np.log10(ft_power)) # compute correlation matrix on log-power: more normally distributed # phase coherence across frequencies ft_coh = compute_coh(stft_pos) # + n_freqs = len(f_stft_pos) plt.figure(figsize=(20,4)) plt.subplot(1,5,1) # subtract out identity matrix so color is scaled better without diagonal 1s plt.imshow(corrmat_power-np.eye(ft_power.shape[0]), cmap='RdBu') plt.ylabel('frequency index') plt.colorbar(); plt.title('log-power correlation matrix') plt.subplot(1,5,2) plt.imshow(abs(ft_coh)-np.eye(ft_power.shape[0])) plt.colorbar(); plt.title('phase-coupling magnitude') plt.subplot(1,5,3) plt.imshow(np.angle(ft_coh)) plt.colorbar(); plt.title('average phase difference') plt.subplot(1,5,4) plt.hist(np.angle(ft_coh[np.triu_indices(n_freqs,1)]), np.linspace(-np.pi, np.pi, 100), color='k', alpha=0.5); plt.title('distribution of phase differences') plt.xticks([-np.pi, np.pi], [r'$-\pi$', r'$\pi$']) plt.subplot(1,5,5) # pick two frequencies to compute the complex conjugate product, and plot all as vectors vecdiff_exmp = stft_magnormed[i_freq]*stft_magnormed[i_freq+10].conj() plt.plot(vecdiff_exmp.real, vecdiff_exmp.imag, '.') plt.plot([0,0],[vecdiff_exmp.mean().real, vecdiff_exmp.mean().imag], '.-r') plt.title('magnitude-normed vector difference') plt.tight_layout() # - # The above plots demonstrate the lack of discernable relationships across frequencies in both power and phase, as expected for the null hypothesis: # 1. no correlation in power across frequencies # 2. no phase coupling across frequencies (max is 1) # 3. & 4. average phase difference between frequencies over time is also uniformly distributed, i.e., no phase delay preference. # # In fact, we can once again set null hypotheses as parametric distributions that the above quantities should follow, in the case of stochastic noise. For example, Pearson correlation of two logged-exponential variables ($log_{10}P_{f}(t)$) has some null distribution one can derive (I don't know what it is off the top of my head). # # For magnitude-normalized phase-coupling magnitude and phase difference: the complex conjugate product of the Fourier coefficients of a pair of frequencies is uniformly distributed around the unit circle across time (last subplot). Each pixel in the second and third subplot is the magnitude and the phase of the average vector in the fifth plot (red). # # ### __How should the average magnitude and phase difference be distributed?__ # --- # # Criterion 3: structure over time # # Lastly, because $X(t)$ at each frequency is a Gaussian R.V., there is no temporal autocorrelation at each frequency (this is only true if overlap between STFT windows is small, we used 0 above). As such, we can examine the power and phase autocorrelation/power spectrum, or the average phase advance (instantaneous frequency, e.g., see Fransen et al., 2015 NeuroImage), at each frequency. Similarly, we can look at the autocorrelation (lag-correlation) of the complex variables themselves, using the same definition of complex correlation. # # In general, though, since STFT is so sparsely sampled in time (every 0.5 or 1 second, whatever the step-size is), this will not find fast temporal structures when used with STFT-estimates, only long-term, potentially experimental trial-related temporal structures. Bandpass/wavelet-Hilbert spectral estimates will resolve finer-grain temporal autocorrelations. # # Moreover, research looking at long-range temporal correlation (LRTC) of neural oscillations commonly employ nonlinear measures such as detrended fluctuation analysis, which similarly measures fluctuation at different timescales. ft_acf = np.array([acf(np.log(p), nlags=20) for p in ft_power]) ft_phadv = (stft_magnormed[:,1:]*stft_magnormed[:,:-1].conj()) # + plt.figure(figsize=(12,4)) plt.subplot(1,3,1) plt.imshow(ft_acf[:,1:], aspect='auto') plt.xlabel('lag'); plt.ylabel('frequency'); plt.title('power autocorrelation') plt.subplot(1,3,2) plt.plot(f_stft_pos, abs(ft_phadv.mean(1)), '.') plt.xlabel('frequency (Hz)'); plt.title('phase advancement consistency'); plt.subplot(1,3,3) plt.plot(f_stft_pos, np.angle(ft_phadv.mean(1)), '.') plt.xlabel('frequency (Hz)'); plt.title('mean phase advancement'); plt.tight_layout() # - # As expected, there are no autocorrelation of spectral power at any frequency, and no consistent phase advancement at any frequency. # --- # # # Detecting oscillations # From this perspective, oscillations (or any other temporally structured events) can be defined as deviations from the above specified distributions under the null hypotheses, for the 3 criteria. We can set up a series of statistical tests to compare data against the hypothesized parametric null distributions, or even use simulated white (or colored) noise signals as a way to generate distributions (as we did above) for non-parametric tests. # # ### for single frequencies: # 1. compute summary statistics - e.g., spectral power CV, skew. # 2. fit parametric distributions and test against null hypotheses stated in Criterion 1. Failure indicates significant deviations from noise. # 3. optimized fitting of parametric distributions to subset of the data to maximize data likelihood (or other metric, e.g., KS test), and detect out-of-distribution samples as events (outlier detection). Can then label each data point with a probabilistic oscillation score. # 4. characterize temporal structure of spectral coefficients (e.g., power autocorrelation, phase advancement) # # ### across multiple frequencies: # # 5. detect correlation structure across frequencies (e.g., percentile spectrum, neighbor phase coupling, spectral PCA a la Kai Miller) # 6. spectral power histograms # 7. iteratively fit parametric distributions at each frequency and the power spectrum via FOOOF (Bayesian FOOOF) to separate aperiodic (and specifically, stochastic) background from structured events # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from cognipy.ontology import Ontology #the ontology processing class from cognipy.ontology import CQL #SPARQL format tailored for Contolled Natural Language from cognipy.ontology import encode_string_for_graph_label #complex datatypes encoder for the graph labels in graph visualisation import textwrap def graph_attribute_formatter(val): if isinstance(val,list) or isinstance(val,set): return " | ".join(list(map(lambda i:encode_string_for_graph_label(graph_attribute_formatter(i)),val))) elif isinstance(val,dict): return " | ".join(list(map(lambda i:i[0]+" : "+encode_string_for_graph_label(graph_attribute_formatter(i[1])),val.items()))) else: return encode_string_for_graph_label(textwrap.fill(str(val),40)) # - content = """ If X connects-to something that connects-to Y then Y follows X. """ onto=Ontology("cnl/string",content, evaluator = lambda e:eval(e,globals(),locals()), graph_attribute_formatter = graph_attribute_formatter) onto.as_cnl() # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import sys print(f'Python version \t: {sys.version:.>{15}}') current_dir_path = os.path.dirname(os.path.realpath(os.getcwd())) print(f'Current working dir\t: {current_dir_path:.>{15}}') # ### reading an utf-8 text file in a list # + book_fr_path = './data/pg17489.txt' with open(book_fr_path,mode='r',encoding='utf-8') as f: all_book = f.readlines() num_lines_all_book = len(all_book) print(f'Number of lines of all the book : {num_lines_all_book}') # - # ### finding relevant text to extract indices # + import re indices = [i for i, elem in enumerate(all_book) if re.match(r'\*\*\* (START|END) OF THIS PROJECT GUTENBERG EBOOK', elem)] print(f'found this lines at index {indices}') ['[{:5}] {}'.format(i,all_book[i]) for i in indices] # - # ### let's get the real book content and get rid of the end of line char real_book = [i.rstrip() for i in all_book[indices[0]+1:indices[1]-1]] # let's show 10 first lines real_book[:10] # and the 10 last lines real_book[-10:] # let's adjust a litle bit to get rid of the project gutenberg information # that's not part of the original V.Hugo book real_book = real_book[5:-5] num_lines_real_book = len(real_book) print(f'Number of lines of all the real book : {num_lines_real_book}') real_book[:10] real_book[-10:] # ### let's extract the Table of Contents indice_table_content = [i for i, elem in enumerate(real_book) if re.match(r'TABLE DES MATIÈRES', elem)][0] print('[{:5}] {}'.format(indice_table_content,real_book[indice_table_content])) # ### we will first find the sections indices of the entire book indices_sections = [i for i, elem in enumerate(real_book) if re.match(r'^Livre (.+)\-\-(.+)$',elem)] print(f'found this Section information at index {indices_sections}') ['[{:5}] {}'.format(i,real_book[i]) for i in indices_sections] # ### we can use the above list indices_sections to extract the first section section1 = real_book[117:2708] print(len(section1)) section1[-10:] # ### let's find the distinct chapters indices in this first section indices_chapitres = [i for i, elem in enumerate(section1) if re.match(r'^Chapitre\s([IVX]+)$',elem)] print(f'found the chapters in this first section at index {indices_sections}') ['[{:5}] {}'.format(i,section1[i]) for i in indices_chapitres] # ### now let's grab the first chapter of the first section section1_chapter_01 = section1[5+1:122-1] section1_chapter_01 # ## Time to generalisation... # now it's time to generalize above, and to make it reusable for future exploration of nlp # + def get_lines_from_file(file_path, file_encoding='utf-8'): """reads a text file at file_path, with specified file_encoding and returns a list of the lines (array of lines) found in this files""" with open(file_path,mode='r',encoding=file_encoding) as f: return f.readlines() def get_indices_for_regex(lines_array, regex_expression): """ returns a list of the indices in the line_array list of strings, that match the given regex_expression """ return [i for i, elem in enumerate(lines_array) if re.match(regex_expression, elem)] def print_indices_text(lines_array, the_indexes, message=''): """will print the lines from the given lines_array at all the_indexes parameter """ print(f'found this {message} at indexes {the_indexes}') print("\n".join(['[{:5}] {}'.format(i,lines_array[i]) for i in the_indexes])) def get_real_book_from_gutenberg_file(gutenberg_file_path, file_encoding='utf-8', offset=1): a_book = get_lines_from_file(gutenberg_file_path) gutenberg_indices = get_indices_for_regex(a_book,r'\*\*\* (START|END) OF THIS PROJECT GUTENBERG EBOOK') if len(gutenberg_indices) == 2: my_real_book = [i.rstrip() for i in a_book[gutenberg_indices[0]+1:gutenberg_indices[1]-1]] return my_real_book[offset:-offset] def print_head_and_tail(lines_array, offset=10): print("### 10 FIRST LINES : ###\n{}\n########################".format("\n".join(lines_array[:offset]))) print("### 10 LAST LINES : ###\n{}\n########################".format("\n".join(lines_array[-offset:]))) # - {elem: for i, elem in enumerate(real_book) if re.match(r'^Livre (.+)\-\-(.+)$', elem)} # + #### let's verify that all the above functions are working as expected my_big_book = get_lines_from_file(book_fr_path) num_lines_my_big_book = len(my_big_book) if num_lines_all_book != num_lines_my_big_book : print(f""" WARNING[Problem in function get_lines_from_file] : in the original books ther is \t {num_lines_all_book} lines, with get_lines_from_files i get\t {num_lines_my_big_book} lines """) else: print(f'OK : found {num_lines_my_big_book} lines in the file {book_fr_path}') my_indices = get_indices_for_regex(my_big_book,r'\*\*\* (START|END) OF THIS PROJECT GUTENBERG EBOOK') if my_indices != indices: print(f""" WARNING[Problem in function get_indices_for_regex] : in the original books the indices where \t {indices} , with get_indices_for_regex i get\t {my_indices} instead ! Check the function ! """) else: print_indices_text(my_big_book, my_indices) my_gutenberg_book = get_real_book_from_gutenberg_file(book_fr_path, file_encoding='utf-8', offset=5) num_lines_my_gutenberg_book = len(my_gutenberg_book) if num_lines_real_book != num_lines_my_gutenberg_book: print(f""" WARNING[Problem in function my_gutenberg_book] : in the original 'real' book there is \t {num_lines_real_book} lines, with get_lines_from_files i get\t {num_lines_my_gutenberg_book} lines instead ! Check the function. """) else: print(f'OK : found {num_lines_real_book} real lines in the gutenberg file {book_fr_path}') print("### 10 FIRST LINES : ###\n{}".format("\n".join(my_gutenberg_book[:10]))) print("### 10 LAST LINES : ###\n{}".format("\n".join(my_gutenberg_book[-10:]))) # - my_sections_indices = get_indices_for_regex(my_gutenberg_book, r'^Livre (.+)\-\-(.+)$') print(f'found this Section information at index {my_sections_indices}') ['my_gutenberg_book[{:5}]= {}'.format(i,my_gutenberg_book[i]) for i in my_sections_indices] # let's keep only the book lines of the real sections my_gutenberg_book = my_gutenberg_book[117:] print_head_and_tail(my_gutenberg_book) my_sections_indices = get_indices_for_regex(my_gutenberg_book, r'^Livre (.+)\-\-(.+)$') print(f'found this Section information at index {my_sections_indices}') for i,v in enumerate(my_sections_indices): print(f'{i:<{5}}:my_gutenberg_book[{v:>{5}}] = {my_gutenberg_book[v]}') ['my_gutenberg_book[{:5}] = {}'.format(i,my_gutenberg_book[i]) for i in my_sections_indices] # + my_sections = {} for i,v in enumerate(my_sections_indices): print(f'{i:<{5}}:{v:>{15}}, {my_gutenberg_book[v]}') key = f'{i+1:>{2}}) {my_gutenberg_book[v]}' print(key, i < (len(my_sections_indices)-1)) if i < (len(my_sections_indices)-1): my_sections[key]=my_gutenberg_book[v:my_sections_indices[i+1]] else: my_sections[key]=my_gutenberg_book[v:] my_sections.keys() # - print_head_and_tail(my_sections[' 1) Livre premier--Un juste']) print_head_and_tail(section1) section1 == my_sections[' 1) Livre premier--Un juste'] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="kPDd_M416jtg" import multiprocessing as mp from string import ascii_uppercase import random from sys import stdout # + [markdown] id="mg3ZIdy234VM" # **OBJETIVO:** # # El objetivo de esta tarea consiste en poner en práctica los conceptos de cómputo concurrente vistos en clase. Se utilizarán procesos por medio del módulo `multiprocessing` del lenguaje de programación Python. # + [markdown] id="Bl-5iBIj7vqO" # ## Programa 1 # + [markdown] id="9WdTV3R32cRb" # 1. Realiza el **Programa1** que instancie, como clase o con el método `Process`, 10 procesos. Cada uno de los procesos hijos recibirán un valor entero, y un caracter $(i,c)$ enviados por el proceso padre, los procesos escribirán en la salida estándar $i$ veces el caracter $c$. # + id="ApuSZo876lqd" def imprimir_mensaje(i, c): for _ in range(i): stdout.write(c) # + colab={"base_uri": "https://localhost:8080/"} id="xmbH7_fw7Ik8" outputId="eabfc7d9-abe5-4ee2-deca-9cf0e5e0c130" num_procesos = 10 for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c)) proceso.start() # + [markdown] id="R4Kw4LBE7zb4" # ## Programa 2 # + [markdown] id="3UYjogKy2rot" # 2. Refactoriza (reescribe) el programa anterior y elabora el **Programa2** que incluya un mecanismo de sincronización el cual permita escribir en orden todos los caracteres de cada proceso. Es decir, que se obtenga la secuencia $c_{1,1},\ldots,c_{i,1},c_{2,1},\ldots,c_{2,i},\ldots,c_{10,1},\ldots,c_{10,i}$, donde cada subsecuencia $c_{k,i}$ para cada $k=1,2,\ldots,10$ es la secuencia de caracteres del proceso hijo $k$ con longitud $i$. # + id="0RywVeBI95-s" mutex = mp.Lock() # + id="W92sZPrZ7yxK" def imprimir_mensaje(i, c): mutex.acquire() for _ in range(i): stdout.write(c) mutex.release() # + colab={"base_uri": "https://localhost:8080/"} id="tlR5CNJN7T2g" outputId="c8be21bb-0326-4884-e493-b1b186e2604f" num_procesos = 10 for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c)) proceso.start() # + [markdown] id="y-IvLmbp_DgI" # ## Programa 3 # + [markdown] id="bl8NPOIT3Teg" # Refactoriza (reescribe) el Programa2 y elabora el **Programa3** donde construyas un mecanismo de sincronización el cual permita escribir en orden todos los caracteres de cada proceso siguiendo una política de orden $p$ que será una lista de números enteros aleatorios con los índices $k$ de cada proceso hijo. La esccritura de los caracteres seguirá la secuencia de $c_{k,i}(p)$ donde cada secuencia $c_{k,i}$ estará definida por la política $p$. # + id="wJYEUA7ayL3s" def imprimir_mensaje(i, c, turno): turn = q.get() while True: mutex.acquire() if (turno.value == turn): # Si es el turno adecuado stdout.write("El turno de " + str(c) + " es " + str(turn) + ". Se imprimirá " + str(i) + " veces\n") for _ in range(i): stdout.write(c) stdout.write("\n") turno.value = turno.value + 1 mutex.release() break mutex.release() # + colab={"base_uri": "https://localhost:8080/"} id="LLaGJmwT3vu1" outputId="e8cab3a4-4971-477a-c256-029738923863" mutex = mp.Lock() # Politica de orden p = list(range(10)) random.shuffle(p) print('La politica de orden es: ', p) q = mp.Queue() for num in p: q.put(num) num_procesos = 10 turno = mp.Value('i', 0) # Definimos un valor entero para llevar los turnos procesos = [None]*num_procesos for i in range(num_procesos): c = ascii_uppercase[i] it = random.randint(3, 6) proceso = mp.Process(target=imprimir_mensaje, args=(it, c, turno)) procesos[i] = proceso for p in procesos: p.start() # + id="hriWpLWe4HMf" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="ovpZyIhNIgoq" # ___ # # # ___ # # Text Generation with Neural Networks # # In this notebook we will create a network that can generate text, here we show it being done character by character. Very awesome write up on this here: http://karpathy.github.io/2015/05/21/rnn-effectiveness/ # # We organized the process into "steps" so you can easily follow along with your own data sets. # - import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf # + # IGNORE THE CONTENT OF THIS CELL # tf.compat.v1.disable_eager_execution() # - # ## Step 1: The Data # # You can grab any free text you want from here: https://www.gutenberg.org/ # # We'll choose all of shakespeare's works (which we have already downloaded for you), mainly for two reasons: # # 1. Its a large corpus of text, its usually recommended you have at least a source of 1 million characters total to get realistic text generation. # # 2. It has a very distinctive style. Since the text data uses old style english and is formatted in the style of a stage play, it will be very obvious to us if the model is able to reproduce similar results. # + colab={} colab_type="code" id="pD_55cOxLkAb" path_to_file = 'shakespeare.txt' # + colab={} colab_type="code" id="aavnuByVymwK" text = open(path_to_file, 'r').read() # + colab={} colab_type="code" id="Duhg9NrUymwO" print(text[:500]) # - # ### Understanding unique characters # + colab={} colab_type="code" id="IlCgQBRVymwR" # The unique characters in the file vocab = sorted(set(text)) print(vocab) len(vocab) # + [markdown] colab_type="text" id="rNnrKn_lL-IJ" # ## Step 2: Text Processing # + [markdown] colab_type="text" id="LFjSVAlWzf-N" # ### Text Vectorization # # We know a neural network can't take in the raw string data, we need to assign numbers to each character. Let's create two dictionaries that can go from numeric index to character and character to numeric index. # + colab={} colab_type="code" id="IalZLbvOzf-F" char_to_ind = {u:i for i, u in enumerate(vocab)} # - char_to_ind # + colab={} colab_type="code" id="IalZLbvOzf-F" ind_to_char = np.array(vocab) # - ind_to_char # + colab={} colab_type="code" id="IalZLbvOzf-F" encoded_text = np.array([char_to_ind[c] for c in text]) # - encoded_text # + [markdown] colab_type="text" id="tZfqhkYCymwX" # We now have a mapping we can use to go back and forth from characters to numerics. # - sample = text[:20] sample encoded_text[:20] # + [markdown] colab_type="text" id="bbmsf23Bymwe" # ## Step 3: Creating Batches # # Overall what we are trying to achieve is to have the model predict the next highest probability character given a historical sequence of characters. Its up to us (the user) to choose how long that historic sequence. Too short a sequence and we don't have enough information (e.g. given the letter "a" , what is the next character) , too long a sequence and training will take too long and most likely overfit to sequence characters that are irrelevant to characters farther out. While there is no correct sequence length choice, you should consider the text itself, how long normal phrases are in it, and a reasonable idea of what characters/words are relevant to each other. # - print(text[:500]) line = "From fairest creatures we desire increase" len(line) part_stanza = """From fairest creatures we desire increase, That thereby beauty's rose might never die, But as the riper should by time decease,""" len(part_stanza) # + [markdown] colab_type="text" id="hgsVvVxnymwf" # ### Training Sequences # # The actual text data will be the text sequence shifted one character forward. For example: # # Sequence In: "" # Sequence Out: "" # # # We can use the `tf.data.Dataset.from_tensor_slices` function to convert a text vector into a stream of character indices. # + colab={} colab_type="code" id="0UHJDA39zf-O" seq_len = 120 # - total_num_seq = len(text)//(seq_len+1) total_num_seq # + colab={} colab_type="code" id="0UHJDA39zf-O" # Create Training Sequences char_dataset = tf.data.Dataset.from_tensor_slices(encoded_text) for i in char_dataset.take(500): print(ind_to_char[i.numpy()]) # + [markdown] colab_type="text" id="-ZSYAcQV8OGP" # The **batch** method converts these individual character calls into sequences we can feed in as a batch. We use seq_len+1 because of zero indexing. Here is what drop_remainder means: # # drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing # whether the last batch should be dropped in the case it has fewer than # `batch_size` elements; the default behavior is not to drop the smaller # batch. # # + colab={} colab_type="code" id="l4hkDU3i7ozi" sequences = char_dataset.batch(seq_len+1, drop_remainder=True) # + [markdown] colab_type="text" id="UbLcIPBj_mWZ" # Now that we have our sequences, we will perform the following steps for each one to create our target text sequences: # # 1. Grab the input text sequence # 2. Assign the target text sequence as the input text sequence shifted by one step forward # 3. Group them together as a tuple # + colab={} colab_type="code" id="9NGu-FkO_kYU" def create_seq_targets(seq): input_txt = seq[:-1] target_txt = seq[1:] return input_txt, target_txt # + colab={} colab_type="code" id="9NGu-FkO_kYU" dataset = sequences.map(create_seq_targets) # - for input_txt, target_txt in dataset.take(1): print(input_txt.numpy()) print(''.join(ind_to_char[input_txt.numpy()])) print('\n') print(target_txt.numpy()) # There is an extra whitespace! print(''.join(ind_to_char[target_txt.numpy()])) # + [markdown] colab_type="text" id="MJdfPmdqzf-R" # ### Generating training batches # # Now that we have the actual sequences, we will create the batches, we want to shuffle these sequences into a random order, so the model doesn't overfit to any section of the text, but can instead generate characters given any seed text. # + colab={} colab_type="code" id="p2pGotuNzf-S" # Batch size batch_size = 128 # Buffer size to shuffle the dataset so it doesn't attempt to shuffle # the entire sequence in memory. Instead, it maintains a buffer in which it shuffles elements buffer_size = 10000 dataset = dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True) # + colab={} colab_type="code" id="p2pGotuNzf-S" dataset # + [markdown] colab_type="text" id="r6oUuElIMgVx" # ## Step 4: Creating the Model # + [markdown] colab_type="text" id="m8gPwEjRzf-Z" # We will use an LSTM based model with a few extra features, including an embedding layer to start off with and **two** LSTM layers. We based this model architecture off the [DeepMoji](https://deepmoji.mit.edu/) and the original source code can be found [here](https://github.com/bfelbo/DeepMoji). # # The embedding layer will serve as the input layer, which essentially creates a lookup table that maps the numbers indices of each character to a vector with "embedding dim" number of dimensions. As you can imagine, the larger this embedding size, the more complex the training. This is similar to the idea behind word2vec, where words are mapped to some n-dimensional space. Embedding before feeding straight into the LSTM usually leads to more realisitic results. # + colab={} colab_type="code" id="zHT8cLh7EAsg" # Length of the vocabulary in chars vocab_size = len(vocab) # The embedding dimension embed_dim = 256 # Number of RNN units rnn_neurons = 256 # - # Now let's create a function that easily adapts to different variables as shown above. from tensorflow.keras.models import Sequential from tensorflow.keras.layers import LSTM,Dense,Embedding,Dropout # + colab={} colab_type="code" id="MtCrdfzEI2N0" def create_model(vocab_size, embed_dim, rnn_neurons, batch_size): model = Sequential() model.add(Embedding(vocab_size,embed_dim,batch_input_shape=[batch_size,None])) model.add(LSTM(rnn_neurons,return_sequences=True,stateful=True)) model.add(Dropout(0.2)) model.add(LSTM(rnn_neurons,return_sequences=True,stateful=True)) model.add(Dropout(0.2)) model.compile(optimizer='adam', loss="sparse_categorical_crossentropy") return model # + colab={} colab_type="code" id="wwsrpOik5zhv" model = create_model( vocab_size = vocab_size, embed_dim=embed_dim, rnn_neurons=rnn_neurons, batch_size=batch_size) # - model.summary() # + [markdown] colab_type="text" id="LJL0Q0YPY6Ee" # ## Step 5: Training the model # # This will take way too long on just CPU, go use Google Collab for GPU. # - epochs = 10 # + # model.fit(dataset,epochs=epochs) # + [markdown] colab_type="text" id="kKkD5M6eoSiN" # ## Step 6: Generating text # # We need to create a loop very similar to how we generated forecasts with RNN with Time Series data. Let's take a look: # + colab={} colab_type="code" id="WvuwZBX5Ogfd" def generate_text(model, start_string): # Evaluation step (generating text using the learned model) # Number of characters to generate num_generate = 1000 # Converting our start string to numbers (vectorizing) input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) # Empty string to store our results text_generated = [] # Low temperatures results in more predictable text. # Higher temperatures results in more surprising text. # Experiment to find the best setting. temperature = 1.0 # Here batch size == 1 model.reset_states() for i in range(num_generate): predictions = model(input_eval) # remove the batch dimension predictions = tf.squeeze(predictions, 0) # using a categorical distribution to predict the word returned by the model predictions = predictions / temperature predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy() # We pass the predicted word as the next input to the model # along with the previous hidden state input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(idx2char[predicted_id]) return (start_string + ''.join(text_generated)) # + colab={} colab_type="code" id="ktovv0RFhrkn" print(generate_text(model, start_string=u"ROMEO: ")) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project: Predicting financial data # ## Part 2 - Mining media content # Author:
    # Date: 17.03.2019
    # Source: http://joerivanwijngaarden.com/project-financial-predictions-p1.html
    # Github: https://github.com/tingidev/jvw-projects/tree/master/project-2-predicting-financial-data

    # ## News items variables # #

      #
    1. keywords - NLTK extracted keywords of text body
    2. #
    3. char_n - Number of characters in text body
    4. #
    5. title - Article's title
    6. #
    7. url - Article's url
    8. #
    9. text - Full article's text
    10. #
    11. brand - Newspaper brand where article was published
    12. #
    13. date - Date
    14. #
    15. date_str - Date as string
    16. #
    17. summary - NLTK extracted summary of text body
    18. #
    # # + # Import friends import os, sys import numpy as np import pandas as pd import pickle import matplotlib.pyplot as plt import bson from matplotlib.ticker import FormatStrFormatter from matplotlib.font_manager import FontProperties from matplotlib.cm import ScalarMappable # %matplotlib inline # Plotting import ast from collections import Counter from wordcloud import WordCloud # Text processing import re import nltk from nltk.corpus import stopwords from nltk.corpus import wordnet from nltk.tokenize import word_tokenize, sent_tokenize from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer import string # - # ## Functions # + # Wordcloud def generate_wordcloud(tup): wordcloud = WordCloud(background_color='white', max_words=30, max_font_size=40, random_state=42 ).generate(str(tup)) return wordcloud # News categories def get_categories(text, brand): splt = text.split('/') if brand == 'nytimes': # nytimes has date before category section if len(splt) > 6: cat = splt[6] else: cat = '' else: cat = splt[3] return cat # Tokenize stop_words = set(stopwords.words('english')) def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return wordnet.ADJ elif treebank_tag.startswith('S'): return wordnet.ADJ_SAT elif treebank_tag.startswith('V'): return wordnet.VERB elif treebank_tag.startswith('N'): return wordnet.NOUN elif treebank_tag.startswith('R'): return wordnet.ADV else: return '' def tokenize(text): ''' Tokenize article text into a list of useful words. Excludes any stopwords, symbols and punctuation. Lemmatize each word to its stem. ''' ps = PorterStemmer() lm = WordNetLemmatizer() try: regex = re.compile('[' + re.escape(string.punctuation) + '0-9\\r\\t\\n]') text = regex.sub(" ",text) tokens_ = word_tokenize(text) pos_tag = nltk.pos_tag(tokens_) tokens = [] for w in pos_tag: if w[0].lower() not in stop_words and re.search('[a-zA-Z]', w[0]) and len(w[0]) >= 3: pos_val = get_wordnet_pos(w[1]) if pos_val: lemm = lm.lemmatize(w[0].lower(),pos_val) tokens.append(lemm) return tokens except TypeError as e: print(text, e) # - # ## Load data # Loading news BSON files base_folder = 'C:/Users/jvw/Dropbox/data_dumps/tingi-sandbox/news_items.bson' dump_folder = ['dump_19_01_09', 'dump_19_02_03'] newsname = base_folder + dump_folder[0] + '/news_items.bson' with open(newsname,'rb') as f: b = bson.decode_all(f.read()) news = pd.DataFrame(b) for i in range(len(dump_folder)-1): newsname = base_folder + dump_folder[i+1] + '/news_items.bson' with open(newsname,'rb') as f: b = bson.decode_all(f.read()) tmp = pd.DataFrame(b) news = news.append(tmp) print('Size:', news.shape, '\n') print(news.dtypes) news.head(3) # Clean data by removing cbsnews (only 3 entries from testing) tmp = news.loc[news['brand']=='cbsnews'] news = news.drop(tmp.index) # Extract categories from URL news['category'] = news[['url', 'brand']].apply(lambda x: get_categories(x['url'], x['brand']), axis=1) # + # News outlets included in dataset brands = np.sort(news['brand'].unique()) places = ['Japan', 'USA', 'Hong Kong', 'UK'] i = 0 tot_n = 0 for brand in brands: grouped = news.loc[news['brand']==brand].groupby('date_str') mean_n = int(np.mean(grouped['date'].size().values)) tot_n += mean_n print('Newspaper:', brand.upper(), '(' + places[i] + ') -', mean_n, 'articles') i += 1 print('Total:', tot_n, 'articles per day') # - # Days of gathered media days = news['date_str'].unique() print('Number of days:', len(days)) print('First day:', days[0]) print('Last day:', days[-1], '\n') print(days) # ## General overview of newspapers (unused & Fig. 6 & 7) # + col = plt.get_cmap('Set1') fig, ax = plt.subplots(figsize=(10, 6)) leg = ['Japan Times (JAP)', 'NY Times (USA)', 'South China MP (HNK)', 'The Guardian (UK)'] ticks = [0, 21, 44] i = 0 for brand in brands: grouped = news.loc[news['brand']==brand].groupby('date_str') ax.plot(grouped['date'].size().values, linewidth=3, color=col.colors[i]) ax.set_xlim(-2, len(days) + 1) ax.set_ylim(0, 950) ax.set_yticks(np.linspace(0, 900, 10)) ax.set_xticks(ticks) ax.set_xticklabels(['Dec \'18', 'Jan \'19', 'Feb \'19']) ax.set_title('Media collected over time', fontsize=18) ax.set_ylabel('Number of articles', fontsize=16) ax.tick_params(axis='x', which='major', labelsize=14) ax.tick_params(axis='y', which='major', labelsize=12) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) i += 1 fig.legend(leg, bbox_to_anchor=(.36, .72), fontsize=12) ax.grid(True) fig.tight_layout() # plt.savefig('./images/unused.png') # + # Distribution of categories across newspapers cmap = plt.get_cmap('plasma') fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 10)) fig.subplots_adjust(hspace=0.7) ttl = ['Japan Times', 'NY Times', 'South China MP', 'The Guardian'] size = 8 i = 0 for brand in brands: grouped = news.loc[news['brand']==brand] categories = grouped['category'].values.tolist() print("There are %d unique categories in " % len(set(categories)) + brand ) ticks = [x[0] for x in Counter(categories).most_common(size)] vals = [x[1] / len(categories) * 100 for x in Counter(categories).most_common(size)] col = [cmap(x / 25) for x in vals] im = fig.axes[i].bar(range(size), vals, color=col) fig.axes[i].set_xticks(range(size)) fig.axes[i].set_xticklabels(ticks, rotation=45, ha="right") fig.axes[i].set_ylim([0, 25]) fig.axes[i].set_title(ttl[i], fontsize=20) fig.axes[i].set_ylabel('Occurence (%)', fontsize=16) fig.axes[i].spines['right'].set_visible(False) fig.axes[i].spines['top'].set_visible(False) fig.axes[i].tick_params(axis='y', which='major', labelsize=12) fig.axes[i].tick_params(axis='x', which='major', labelsize=16) i += 1 # Colorbar sm = ScalarMappable(cmap=cmap) sm.set_array([0, 25]) fig.subplots_adjust(right=0.88) cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7]) cb = fig.colorbar(sm, cax=cbar_ax, ticks=np.linspace(5, 25, 5)) cb.ax.tick_params(labelsize=12) # plt.savefig('./images/fig6.png', bbox_inches='tight') # - # Unique keywords list_of_keywords = news['keywords'].values.flatten() all_keywords = [item for sublist in list_of_keywords for item in sublist] print("There are %d unique keywords across all articles." % len(set(all_keywords))) print("There are %d total keywords." % len(all_keywords)) most_common=Counter(all_keywords).most_common(50) most_common[:10] # Generate wordcloud for each newspaper fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 10)) i = 0 for brand in brands: list_of_keywords = news.loc[news['brand']==brand]['keywords'].values.flatten() all_keywords = [item for sublist in list_of_keywords for item in sublist] img = generate_wordcloud(all_keywords) fig.axes[i].imshow(img) fig.axes[i].axis('off') fig.axes[i].set_title(leg[i], fontsize=24) i += 1 fig.tight_layout() # plt.savefig('./images/fig7.png') # ## Tokenize & example # Tokenize summary (takes 10 mintutes or so on a laptop) news['tokens'] = news['summary'].map(tokenize) # Print examples for text, summary, tokens, keywords in zip(news['text'].head(8), news['summary'].head(8), news['tokens'].head(8), news['keywords'].head(8)): print(text) print(summary) print(tokens) print(keywords) print(' ') # ## Term frequency-inversed document frequency (TF-IDF) & example (Fig. 8) # + # Apply tf-idf function to summary data from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer n_features = 100000 tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, tokenizer=tokenize, max_features=n_features, ngram_range=(1, 1)) tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, tokenizer=tokenize, max_features=n_features, ngram_range=(1, 1)) # Run it for theguardian as an example brand = brands[1] grouped = news.loc[news['brand']==brand]['summary'].values tf = tf_vectorizer.fit_transform(list(grouped)) tfidf = tfidf_vectorizer.fit_transform(list(grouped)) # - # TF counts for corpus count = dict(zip(tf_vectorizer.get_feature_names(), tf.toarray().sum(axis=0))) tfmat = pd.DataFrame().from_dict(count, orient='index') tfmat.columns = ['Total count'] tfmat.sort_values(by='Total count', ascending=False).head(10).transpose() # TF counts for example article example = grouped[10] print(example) count_example = Counter(tokenize(example)) xxx = pd.DataFrame().from_dict(count_example, orient='index') xxx.columns = ['Example count'] xxx.sort_values(by='Example count', ascending=False).head(10).transpose() # TF-IDF for example article score = dict(zip(tfidf_vectorizer.get_feature_names(), np.around(tfidf.toarray()[10], 2))) tfidfmat = pd.DataFrame().from_dict(score, orient='index') tfidfmat.columns = ['Example tf-idf'] tfidfmat.sort_values(by='Example tf-idf', ascending=False).head(10).transpose() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np list1=[1,2,3,4] array1=np.array(list1) array1 list2=[11,22,33,44] lists=[list1,list2] lists array2=np.array(lists) array2 array2.shape array2.dtype np.zeros(5) zeros=np.zeros(5) zeros zeros.dtype ones=np.ones(5) ones ones=np.ones([4,4]) ones empty=np.empty(5) empty iden=np.eye(5) iden np.arange(5) np.arange(3,30) np.arange(3,30,4) # + ######################################################################################### # - 5.0/2 arr1=np.array([[1,2,3,4],[8,9,10,11]]) arr1 arr1*arr1 arr1*2 arr1-arr1 arr1+1 1/arr1 arr1**3 # + ################################################################################## # - arr=np.arange(12) arr arr=np.arange(0,5) arr arr[3] arr[1:4] arr[0:4] arr[0:4]=100 arr arr=np.arange(11) slice=arr[0:6] slice slice[:]=99 slice arr arr_copy=arr.copy() arr_copy slice_arr=arr_copy[0:6] slice_arr slice_arr[:]=80 slice_arr arr_copy arr arr_2d=np.array(([5,10,15],[20,25,30],[35,40,45])) arr_2d arr_2d[1] arr_2d[0] arr_2d[0:2] arr_2d[1][0] arr_2d[:2,1:] arr_2d[:2,:1] arr2d=np.zeros((10,10)) arr2d arr_length=arr2d.shape[1] for i in range(arr_length): arr2d[i]=i arr2d arr2d[[2,6,3]] # + ####################################################################################### # - arr=np.arange(50).reshape(5,10) arr arr.T np.dot(arr.T,arr) arr3d=np.arange(50).reshape((5,5,2)) arr3d arr=np.array([[1,2,3]]) arr arr.swapaxes(0,1) # + ############################################################################################ # - arr=np.arange(11) np.sqrt(arr) np.exp(arr) A=np.random.rand(10) A B=np.random.rand(10) B np.add(A,B) np.maximum(A,B) np.minimum(A,B) # + ############################################# # - import matplotlib.pyplot as plt # %matplotlib inline points = np.arange(-5,5,0.01) dx,dy=np.meshgrid(points,points) dx dy z=(np.sin(dx)+np.sin(dy)) z plt.imshow(z) plt.imshow(z) plt.colorbar plt.title("sinx + siny") # + A=np.array([1,2,3,4]) B=np.array([100,200,300,400]) # - C=np.array([True,True,False,False]) ans=[(A_val if cond else B_val) for A_val,B_val,cond in zip(A,B,C)] ans ans2=np.where(C,A,B) ans2 arr= np.random.rand(5,5) arr np.where(arr<0.5,0,arr) arr=np.array([[1,2,3],[4,5,6],[7,8,9]]) arr arr.sum() arr.sum(1) arr.mean(1) arr.mean() arr.std() arr.var() bool_arr=np.array([True,False,True]) bool_arr bool_arr.any() bool_arr.all() arr=np.random.rand(5) arr arr.sort() arr countries=(['USA','USA','FRANCE','GERMANY']) np.unique(countries) np.in1d(['FRANCE','USA','INDIA'],countries) # + #################################################################################### # - arr=np.arange(5) np.save('my array',arr) arr=np.arange(10) arr np.load('my array.npy') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Simple Demo of KubeVaspInteractive # This tutorial shows how to setup pod deployment with same FileSystem mounting to be used for KubeVaspInteractive # # First, check if volume mounting is enabled in current pod # + language="bash" # kubectl get pod $HOSTNAME -o yaml | grep "mountPath" -2 # - # Let's use `/home/jovyan/data` as the shared volume mount between local and remote pods. # ### Create pods with similar specs as local pod # `vasp_interactive.kubernetes` provides several helper functions to deploy a "similar pod" from vasp_interactive.kubernetes import KubeVaspInteractive, create_kube_pods # `create_kube_pods` parses the current pod specs and generate a KubeCluster for scheduling and scaling (can also be achieved via native scalable deployment!) # # It takes the resoures and waits for pods to be ready. # # Let's first deploy 2 pods. Check the status of deployment at: # # https://laikapack-controller.cheme.cmu.edu/p/c-qc7lr:p-cl5h6/workloads cluster, worker_pods = create_kube_pods(scale=2, cpu=8, memory="4Gi") worker_pods # ### Run isolated VASP process # `KubeVaspInteractive` just need to take the name and namespace for the pod to inject `kubectl exec` commands. # %mkdir -p /home/jovyan/data/kube-vpi-test # %rm -rf /home/jovyan/data/kube-vpi-test/* from ase.build import molecule mol = molecule("CH4", vacuum=5, pbc=True) vasp_params = dict(xc="pbe", kpts=1, encut=350, istart=0) calc = KubeVaspInteractive(directory="/home/jovyan/data/kube-vpi-test", pod=worker_pods[0], **vasp_params) # `calc._args` are the arguments the calculator uses for communication. It essentially `kubectl exec` into the pod, change the directory and run VASP there calc._args # Let's use classic mode to see where VASP is running mol.calc = calc mol.get_potential_energy() # Now run `top` command in both local terminal and pod `dask-jovyan-c6f5a345-66c2ds` # One advantage of process isolation is killing the pods also releases any processes associated with them. cluster.close() # The communication is down calc.process.poll() # Note currently after pod is deleted / stopped, you need to create the calculator again for further calculations. # ### Simple pod synchronization # One advantage of process isolation is that running calculations in parallel is feasible. # To do this, we need to use some sort of concurrency for the processes on local pod. One possibility is to use threading # + from pathlib import Path import time mol1 = molecule("CH4", vacuum=5, pbc=True) mol2 = mol1.copy() mol2.rattle(stdev=0.1) vasp_params = dict(xc="pbe", kpts=1, encut=350, istart=0) root = Path("/home/jovyan/data/scratch") # + cluster, worker_pods = create_kube_pods(scale=2, cpu=8, memory="2Gi") image = cluster.workers[0]._pod.spec.containers[0].image # calculation part calc1 = KubeVaspInteractive( directory=root / "kube-vpi-test1", pod=worker_pods[0], **vasp_params ) calc2 = KubeVaspInteractive( directory=root / "kube-vpi-test2", pod=worker_pods[1], **vasp_params ) # - # Following part is running serial code # Serial with calc1, calc2: mol1.calc = calc1 mol2.calc = calc2 t_ = time.perf_counter() e1 = mol1.get_potential_energy() e2 = mol2.get_potential_energy() print("Serial mode:") print(e1, e2) print(f"Walltime for 2 sp calculations: {time.perf_counter() - t_}") # Let's now use threading to contain the processes. Note the function to be passed to threading must have mutable objects to store data. # + from threading import Thread def _thread_calculate(atoms, energy): """A threaded version of atoms.get_potential_energy. Energy is a one-member list ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB """ energy[0] = atoms.get_potential_energy() return # Pseudo-parallel with calc1, calc2: mol1.calc = calc1 mol2.calc = calc2 # need to use mutable object to store energy e1 = [999] e2 = [999] threads = [ Thread(target=_thread_calculate, args=(mol1, e1)), Thread(target=_thread_calculate, args=(mol2, e2)), ] t_ = time.perf_counter() for th in threads: th.start() for th in threads: th.join() print("Threaded mode:") print(e1[0], e2[0]) print(f"Walltime for 2 sp calculations: {time.perf_counter() - t_}") # - # As can be seen, the wall time reduced to almost half compared with the serial code, indicating the kubernetes isolation is scalable. cluster.close() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # cuDF Cheat Sheets sample code # # (c) 2020 NVIDIA, Blazing SQL # # Distributed under Apache License 2.0 # ### Imports import cudf import numpy as np # ### Sample DataFrame df = cudf.DataFrame( [ (39, 6.88, np.datetime64('2020-10-08T12:12:01'), np.timedelta64(14378,'s'), 'C', 'D', 'data' , 'RAPIDS.ai is a suite of open-source libraries that allow you to run your end to end data science and analytics pipelines on GPUs.') , (11, 4.21, None, None , 'A', 'D', 'cuDF' , 'cuDF is a Python GPU DataFrame (built on the Apache Arrow columnar memory format)') , (31, 4.71, np.datetime64('2020-10-10T09:26:43'), np.timedelta64(12909,'s'), 'U', 'D', 'memory' , 'cuDF allows for loading, joining, aggregating, filtering, and otherwise manipulating tabular data using a DataFrame style API.') , (40, 0.93, np.datetime64('2020-10-11T17:10:00'), np.timedelta64(10466,'s'), 'P', 'B', 'tabular' , '''If your workflow is fast enough on a single GPU or your data comfortably fits in memory on a single GPU, you would want to use cuDF.''') , (33, 9.26, np.datetime64('2020-10-15T10:58:02'), np.timedelta64(35558,'s'), 'O', 'D', 'parallel' , '''If you want to distribute your workflow across multiple GPUs or have more data than you can fit in memory on a single GPU you would want to use Dask-cuDF''') , (42, 4.21, np.datetime64('2020-10-01T10:02:23'), np.timedelta64(20480,'s'), 'U', 'C', 'GPUs' , 'BlazingSQL provides a high-performance distributed SQL engine in Python') , (36, 3.01, np.datetime64('2020-09-30T14:36:26'), np.timedelta64(24409,'s'), 'T', 'D', None , 'BlazingSQL is built on the RAPIDS GPU data science ecosystem') , (38, 6.44, np.datetime64('2020-10-10T08:34:36'), np.timedelta64(90171,'s'), 'X', 'B', 'csv' , 'BlazingSQL lets you ETL raw data directly into GPU memory as a GPU DataFrame (GDF)') , (17, 5.28, np.datetime64('2020-10-09T08:34:40'), np.timedelta64(30532,'s'), 'P', 'D', 'dataframes' , 'Dask is a flexible library for parallel computing in Python') , (10, 8.28, np.datetime64('2020-10-03T03:31:21'), np.timedelta64(23552,'s'), 'W', 'B', 'python' , None) ] , columns = ['num', 'float', 'datetime', 'timedelta', 'char', 'category', 'word', 'string'] ) df['category'] = df['category'].astype('category') # --- # # # Transforming # # --- # #### cudf.core.dataframe.DataFrame.apply_rows() # + def regression(a, b, output, A_coeff, B_coeff, constant): for i, (aa, bb) in enumerate(zip(a,b)): output[i] = A_coeff * aa + B_coeff * bb + constant df.apply_rows( regression , incols = {'num': 'a', 'float': 'b'} , outcols = {'output': np.float64} , kwargs = {'A_coeff': 0.21, 'B_coeff': -2.82, 'constant': 3.43} ) # - # #### cudf.core.dataframe.DataFrame.drop() df.drop(1) df.drop(range(1,7)) df.drop('word', axis=1) df.drop(['word', 'category'], axis=1) # #### cudf.core.dataframe.DataFrame.dropna() df.dropna() df.dropna(subset=['datetime']) df.dropna(how='all') df.dropna(axis=1) # #### cudf.core.dataframe.DataFrame.fillna() df.fillna({'num': 1}) # #### cudf.core.dataframe.DataFrame.join() # + categories = cudf.DataFrame([ ('B', 'cuDF') , ('C', 'BlazingSQL') , ('D', 'Dask') ], columns=['cat', 'name']) df.join(categories) # - df.join(categories, lsuffix='_l', rsuffix='_r', how='left') # #### cudf.core.dataframe.DataFrame.merge() categories = cudf.DataFrame([ ('B', 'cuDF') , ('C', 'BlazingSQL') , ('D', 'Dask') ], columns=['category', 'name']) df.merge(categories, on='category') df.merge(categories, on='category', how='left') # #### cudf.core.dataframe.DataFrame.rename() df.rename({0: 100}) df.rename({'num': 'numbers'}, axis=1) # #### cudf.core.dataframe.DataFrame.reset_index() df.reset_index() df.reset_index(drop=True) # #### cudf.core.dataframe.DataFrame.set_index() df.set_index('category') df.set_index('category', drop=False) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by ; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).* # # *The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* # # ## Simple Plots in Matplotlib # # Matplotlib is a multi-platform data visualization library built on NumPy arrays. # It was conceived by in 2002, originally as a patch to IPython for enabling interactive MATLAB-style plotting via gnuplot from the IPython command line. # The Matplotlib package was born shortly aftwards, with version 0.1 released in 2003. # It received an early boost when it was adopted as the plotting package of choice of the Space Telescope Science Institute (the folks behind the Hubble Telescope), which financially supported Matplotlib’s development and greatly expanded its capabilities. # # One of Matplotlib’s most important features is its ability to play well with many operating systems and graphics backends. # Matplotlib supports dozens of backends and output types, which means you can count on it to work regardless of which operating system you are using or which output format you wish. # This cross-platform, everything-to-everyone approach has been one of the great strengths of Matplotlib. # It has led to a large user base, which in turn has led to an active developer base and Matplotlib’s powerful tools and ubiquity within the scientific Python world. # ### Importing Matplotlib # # Just as we use the ``np`` alias for NumPy we will use a standard shorthand for Matplotlib: import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # This notation probably looks a little unusual. We're not importing the whole of the matploblib package in this case, just the pyplot interface. Matplotlib actually provides two different interfaces, one based on MATLAB, and a second object-oriented interface. We will stick with the MATLAB style interface offered by pyplot for the purposes of this notebook. The magic function below the import tells the notebook that we want static images of any plots, rather than the interactive plots we saw in the example from the previous topic. # ### Simple Line Plots # Perhaps the simplest of all plots is the visualization of a single function $y = f(x)$. # Here we will take a first look at creating a simple plot of this type. # For all Matplotlib plots, we start by creating a figure and an axes. # In their simplest form, a figure and axes can be created as follows: fig = plt.figure() ax = plt.axes() # In Matplotlib, the *figure* (an instance of the class ``plt.Figure``) can be thought of as a single container that contains all the objects representing axes, graphics, text, and labels. # The *axes* (an instance of the class ``plt.Axes``) is what we see above: a bounding box with ticks and labels, which will eventually contain the plot elements that make up our visualization. # # As we're using the MATLAB style interface we technically don't need either `fig` or `ax`, they just allow us to draw the blank graph above. If we want to save our images to a file we will need `fig` later on, however we will never use `ax` in this notebook, and you could just omit it from the code above. # # If you wanted to, you could run the code below without having created the figure and axes above. The code should be quite self-explanatory, we create a numpy array of 100 linear spaced points from 0 to 10, and then plot sin(x) at those points. If you reduce the number of points from 100 you will see the graph becomes less smooth, and it you increase it from 100 you will see the graph takes much longer to render. x = np.linspace(0, 10, 100) plt.plot(x, np.sin(x)); # If we want to create a single figure with multiple lines, we can simply call the ``plot`` function multiple times: plt.plot(x, np.sin(x)) plt.plot(x, np.cos(x)); # You're not just limited to built-in NumPy functions, you can create graphs of any function you like: plt.plot(x, x**2) plt.plot(x,(x**3)/2); # We can even plot our own data if we store the x and y values in NumPy arrays: # + x = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 250]) y = np.array([0.38, 0.82, 1.27, 1.78, 2.43, 2.77, 3.44, 4.01, 4.60, 5.03, 5.58, 6.17, 6.89, 7.45, 7.83, 14.73]) plt.plot(x,y); # - # That's all there is to plotting simple functions in Matplotlib! # We'll now dive into some more details about how to control the appearance of the axes and lines. # ### Adjusting the Plot: Line Colors and Styles # The first adjustment you might wish to make to a plot is to control the line colors and styles. # The ``plt.plot()`` function takes additional arguments that can be used to specify these. # To adjust the color, you can use the ``color`` keyword, which accepts a string argument representing virtually any imaginable color. # The color can be specified in a variety of ways: plt.plot(x, np.sin(x - 0), color='blue') # specify color by name plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk) plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1 plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF) plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1 plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supported # If no color is specified, Matplotlib will automatically cycle through a set of default colors for multiple lines. # # Similarly, the line style can be adjusted using the ``linestyle`` keyword: # + plt.plot(x, x + 0, linestyle='solid') plt.plot(x, x + 1, linestyle='dashed') plt.plot(x, x + 2, linestyle='dashdot') plt.plot(x, x + 3, linestyle='dotted'); # For short, you can use the following codes: plt.plot(x, x + 4, linestyle='-') # solid plt.plot(x, x + 5, linestyle='--') # dashed plt.plot(x, x + 6, linestyle='-.') # dashdot plt.plot(x, x + 7, linestyle=':'); # dotted # - # If you would like to be extremely terse, these ``linestyle`` and ``color`` codes can be combined into a single non-keyword argument to the ``plt.plot()`` function: plt.plot(x, x + 0, '-g') # solid green plt.plot(x, x + 1, '--c') # dashed cyan plt.plot(x, x + 2, '-.k') # dashdot black plt.plot(x, x + 3, ':r'); # dotted red # These single-character color codes reflect the standard abbreviations in the RGB (Red/Green/Blue) and CMYK (Cyan/Magenta/Yellow/blacK) color systems, commonly used for digital color graphics. # # There are many other keyword arguments that can be used to fine-tune the appearance of the plot; for more details, take a look at the documentation of `plot` here: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html # ### Adjusting the Plot: Axes Limits # # Matplotlib does a decent job of choosing default axes limits for your plot, but sometimes it's nice to have finer control. # The most basic way to adjust axis limits is to use the ``plt.xlim()`` and ``plt.ylim()`` methods: # + plt.plot(x, np.sin(x)) plt.xlim(-1, 11) plt.ylim(-1.5, 1.5); # - # A useful related method is ``plt.axis()``which allows you to set the ``x`` and ``y`` limits with a single call, by passing a list which specifies ``[xmin, xmax, ymin, ymax]``: plt.plot(x, np.sin(x)) plt.axis([-1, 11, -1.5, 1.5]); # The ``plt.axis()`` method goes even beyond this, allowing you to do things like automatically tighten the bounds around the current plot: plt.plot(x, np.sin(x)) plt.axis('tight'); # It allows even higher-level specifications, such as ensuring an equal aspect ratio so that on your screen, one unit in ``x`` is equal to one unit in ``y``: plt.plot(x, np.sin(x)) plt.axis('equal'); # ### Labeling Plots # # As the last piece of this subsection, we'll briefly look at the labeling of plots: titles, axis labels, and simple legends. # # Titles and axis labels are the simplest such labels—there are methods that can be used to quickly set them: plt.plot(x, np.sin(x)) plt.title("A Sine Curve") plt.xlabel("x") plt.ylabel("sin(x)"); # When multiple lines are being shown within a single axes, it can be useful to create a plot legend that labels each line type. # Again, Matplotlib has a built-in way of quickly creating such a legend, using the ``plt.legend()`` method. # Though there are several valid ways of using this, I find it easiest to specify the label of each line using the ``label`` keyword of the plot function: # + plt.plot(x, np.sin(x), '-g', label='sin(x)') plt.plot(x, np.cos(x), ':b', label='cos(x)') plt.legend(); # - # As you can see, the ``plt.legend()`` function keeps track of the line style and color, and matches these with the correct label. # ### Simple Scatter Plots # Another commonly used plot type is the simple scatter plot, a close cousin of the line plot. # Instead of points being joined by line segments, here the points are represented individually with a dot, circle, or other shape. If we choose to we can produce scatter plots using the same `plt.plot()` method that we used previously by providing an additional argument: # + x = np.linspace(0, 10, 30) y = np.sin(x) plt.plot(x, y, 'dk'); # - # The third argument in the function call is a character that represents the symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here (don't worry too much about the code, its just creating a load of random data points): rng = np.random.RandomState(0) for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']: plt.plot(rng.rand(5), rng.rand(5), marker, label="marker='{0}'".format(marker)) plt.legend(numpoints=1) plt.xlim(0, 1.8); # For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them: plt.plot(x, y, '--ob'); # Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers: plt.plot(x, y, '-p', color='gray', markersize=15, linewidth=10, markerfacecolor='white', markeredgecolor='gray', markeredgewidth=2) plt.ylim(-1.2, 1.2); # ### Scatter Plots with ``plt.scatter`` # # A second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function: plt.scatter(x, y, marker='o'); # The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data. # # Let's show this by creating a random scatter plot with points of many colors and sizes. # In order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level: # + rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(100) plt.scatter(x, y, c=colors, s=sizes, alpha=0.3, cmap='viridis') plt.colorbar(); # show color scale # - # Notice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels. # In this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data. # # For example, we might use the Iris data from Scikit-Learn, where each sample is one of three types of flowers that has had the size of its petals and sepals carefully measured: # + from sklearn.datasets import load_iris iris = load_iris() features = iris.data.T fig = plt.figure() plt.scatter(features[0], features[1], alpha=0.2, s=100*features[3], c=iris.target, cmap='viridis') plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1]); # - # We can see that this scatter plot has given us the ability to simultaneously explore four different dimensions of the data: # the (x, y) location of each point corresponds to the sepal length and width, the size of the point is related to the petal width, and the color is related to the particular species of flower. # Multicolor and multifeature scatter plots like this can be useful for both exploration and presentation of data. # # Notice how we also used the line `fig = plt.figure()` in this code block. This is going to allow us to save this image in just a little while. # ### ``plot`` Versus ``scatter``: A Note on Efficiency # # Aside from the different features available in ``plt.plot`` and ``plt.scatter``, why might you choose to use one over the other? While it doesn't matter as much for small amounts of data, as datasets get larger than a few thousand points, ``plt.plot`` can be noticeably more efficient than ``plt.scatter``. # The reason is that ``plt.scatter`` has the capability to render a different size and/or color for each point, so the renderer must do the extra work of constructing each point individually. # In ``plt.plot``, on the other hand, the points are always essentially clones of each other, so the work of determining the appearance of the points is done only once for the entire set of data. # For large datasets, the difference between these two can lead to vastly different performance, and for this reason, ``plt.plot`` should be preferred over ``plt.scatter`` for large datasets. # ### Saving Figures to File # # One nice feature of Matplotlib is the ability to save figures in a wide variety of formats. # Saving a figure can be done using the ``savefig()`` command. # For example, to save the previous figure as a PNG file, you can run this: fig.savefig('my_figure.png') # We now have a file called ``my_figure.png`` in the current working directory: # !ls -lh my_figure.png # To confirm that it contains what we think it contains, let's use the IPython ``Image`` object to display the contents of this file: from IPython.display import Image Image('my_figure.png') # There is signicantly more that you can do with Matplotlib, but hopefully this has given you a useful insight into how to draw simple plots. If you want to learn more about using Matplotlib you can refer to Chapter 4 of the Python Data Science Handbook, and look at the many examples on the Matplotlib website: https://matplotlib.org/3.1.1/index.html # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## 习题 1.3 # # 数据: data <- read.table("./ex_1_3.txt", header=TRUE) data summary(data) attach(data) # 方便直接使用 Year, Nationwide, Rural, Urban 这几个变量。 # ### (1) # 求均值、方差、标准差、变异系数、偏度、峰度 # # **均值**: c(mean(Nationwide), mean(Rural), mean(Urban)) # 或者,R 提供有一个更方便的函数 `colMeans`,可以直接计算 data frame 各列均值 (`data[-1]` 排除了第一列 Year): colMeans(data[-1]) # colMeans 函数等价于如下 apply: apply(data[-1], MARGIN=2, FUN=mean) # 注:`MARGIN = 2` 就是取列。 # **方差**: apply(data[-1], 2, var) # **标准差**: apply(data[-1], 2, sd) # **变异系数**: # + cv <- function(x) sd(x)/mean(x) apply(data[-1], 2, cv) # - # **偏度**(skewness): # 可以调用一个库完成计算: # install.packages("psych") library(psych) # + # g1 <- function(x) skew(x, type=2) # g1、g2 是用 type=2: seehelp(skew) # or 按照书上(P6)的手写这个函数 g1 <- function(x) { n <- length(x) A <- n / ((n-1) * (n-2)) B <- 1 / sd(x)^3 S <- sum((x - mean(x))^3) A * B * S } apply(data[-1], 2, g1) # - # **峰度**: # + # g2 <- function(x) kurtosi(x, type=2) # help(kurtosi) g2 <- function(x) { n <- length(x) A <- (n * (n+1)) / ((n-1) * (n-2) * (n-3)) B <- 1 / sd(x)^4 S <- sum((x - mean(x))^4) C <- (3 * (n-1)^2) / ((n-2) * (n-3)) A * B * S - C } apply(data[-1], 2, g2) # - # 注:实际上,psych 包提供了一个 `describe` 函数,可以一次性得到各种常用值: describe(data[-1], type=2) # ### (2) # # **中位数**: apply(data[-1], 2, median) # **四分位距**: apply(data[-1], 2, quantile) # 五数: fn <- apply(data[-1], 2, fivenum) fn # 四分位极差: # + R1 <- function(Q3, Q1) Q3 - Q1 R1(Q3=fn[4,], Q1=fn[2,]) # - # 三均值: # + M3 <- function(Q1, M, Q3) Q1/4 + M/2 + Q3/4 M3(Q1=fn[2,], M=fn[3,], Q3=fn[4,]) # - # ### (3) 直方图 histogram <- function(x, xname="x") { hist(x, prob=TRUE, main=paste("Histogram of" , xname)) lines(density(x)) rug(x) # show the actual data points } # layout(matrix(c(1,2,3), nr=1, byrow=T)) histogram(Nationwide, xname="Nationwide") histogram(Rural, xname="Rural") histogram(Urban, xname="Urban") # ### (4) 茎叶图 stem(Nationwide) stem(Rural) stem(Urban) # ### (5) 异常值 abnormal <- function(x) { fn <- fivenum(x); Q1 <- fn[2]; Q3 <- fn[4]; R1 <- Q3 - Q1 QD <- Q1 - 1.5 * R1 QU <- Q3 + 1.5 * R1 x[(x < QD) | (x > QU)] } apply(data[-1], 2, abnormal) # 结果为空:没有异常值。 detach(data) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial for "Advances in machine learning for molecules" # _Summer school for Machine Learning in Bioinformatics, Moscow_ # , August 2020 # # This notebook has been designed to complement Miguel's lecture earlier this afternoon by demonstrating some of the concepts he discussed. We will begin by importing the associated dependencies to run this notebook, see the repo's README for details on how to get these installed. After this, we will then describe how one can manipulate molecules in [RDKIT](https://rdkit.org/), a popular chemoinformatics toolkit, of which no existing knowledge of is expected. We will then go on to implementing and testing the regression models on molecules Miguel described, namely MLP on fingerints, RNN/CNN on SMILES strings, and GNNs on the molecular graph. # # Tasks which you need to complete I have labelled with **🧪 Task #** , and the missing code location with `# ====== TASK -- FILL IN ======` in the code cell. Completing these will require some basic knowledge of [PyTorch](https://pytorch.org/), if you haven't used this before you can go through the tutorial [here](https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) or read through the [docs](https://pytorch.org/docs/stable/index.html) as and when this is needed (I've tried to add links to relevant parts where appropriate). I wasn't sure on everyone's background so I apologize if this notebook is either too long or too short. To this end there are some optional tasks (marked with 🕰 ) which you can do later or simply ignore! I have also tried to include links at the end of each section for further reading. Finally, if you spot any bugs/typos (sorry 😳 ), please create a GitHub issue or PR and I can fix them! # # ## 1. Imports # # This section will start by installing any missing packages if you're running in Colab. Then it will import all the required modules needed later. See the repo's README for further details about installation. # # ### Install missing packages on Google Colab # # If you're running on Colab the cell below should install the missing requirements. # # + # Some standard library imports that we need to run the rest of this cell: import os import sys import requests import subprocess import shutil from logging import getLogger, StreamHandler, INFO # We are first going to check if we are in Colab. IN_COLAB = 'google.colab' in sys.modules # If in Colab then we will install some more packages if IN_COLAB: print("Detected that you're running the notebook in Colab --" "will install mising packages (this can take a few ming)") # !python --version # Some missing things which can be obtained through pip # !pip install pytorch-ignite # !pip install watermark # We need to clone our repo and go inside so that can load our module/dataset # ! git clone https://github.com/john-bradshaw/ml-in-bioinformatics-summer-school-2020 os.chdir('ml-in-bioinformatics-summer-school-2020') # We are now going to install RDKit (chemoinformatics toolkit) # I've copied the code to do this from: https://gist.github.com/philopon/a75a33919d9ae41dbed5bc6a39f5ede2 # Which attempts to install it via miniconda with the same Python version as Colab is using and then adding # the miniconda site-packages directory to the path. logger = getLogger(__name__) logger.addHandler(StreamHandler()) logger.setLevel(INFO) def install( chunk_size=4096, file_name="Miniconda3-latest-Linux-x86_64.sh", url_base="https://repo.continuum.io/miniconda/", conda_path=os.path.expanduser(os.path.join("~", "miniconda")), rdkit_version=None, add_python_path=True, force=False): """install rdkit from miniconda ``` import rdkit_installer rdkit_installer.install() ``` """ python_path = os.path.join( conda_path, "lib", "python{0}.{1}".format(*sys.version_info), "site-packages", ) if add_python_path and python_path not in sys.path: logger.info("add {} to PYTHONPATH".format(python_path)) sys.path.append(python_path) if os.path.isdir(os.path.join(python_path, "rdkit")): logger.info("rdkit is already installed") if not force: return logger.info("force re-install") url = url_base + file_name python_version = "{0}.{1}.{2}".format(*sys.version_info) logger.info("python version: {}".format(python_version)) if os.path.isdir(conda_path): logger.warning("remove current miniconda") shutil.rmtree(conda_path) elif os.path.isfile(conda_path): logger.warning("remove {}".format(conda_path)) os.remove(conda_path) logger.info('fetching installer from {}'.format(url)) res = requests.get(url, stream=True) res.raise_for_status() with open(file_name, 'wb') as f: for chunk in res.iter_content(chunk_size): f.write(chunk) logger.info('done') logger.info('installing miniconda to {}'.format(conda_path)) subprocess.check_call(["bash", file_name, "-b", "-p", conda_path]) logger.info('done') logger.info("installing rdkit") subprocess.check_call([ os.path.join(conda_path, "bin", "conda"), "install", "--yes", "-c", "rdkit", "python=={}".format(python_version), "rdkit" if rdkit_version is None else "rdkit=={}".format(rdkit_version)]) logger.info("done") import rdkit logger.info("rdkit-{} installation finished!".format(rdkit.__version__)) install() print("Installed Colab specifics -- now try running the cells below.") else: print("Not in Colab, ensure that you have setup the required packages in an alternative way (e.g. via Conda).") # - # ### Imports # # We can now check that the installation worked correctly by making sure we can import the required Python modules by running the code cells below. # + # = import items from the Python standard library import functools import typing import importlib import itertools import copy # + # = import numpy, matplotlib and other useful common libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.core.debugger import set_trace # for debugging import tabulate import altair as alt from IPython.core.display import display, HTML #alt.renderers.enable('notebook') # - # = import the parts of RDKit that we need import rdkit from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import Draw from rdkit.Chem import PandasTools # + # = import the parts of PyRorch that we need import torch from torch.utils import data from torch import nn from torch.nn import functional as F # - # = import the local Python modules we have written import ss_utils # = print out our environment -- doesn't matter if this cell does not work # %load_ext watermark # %watermark -v -p numpy,scipy,torch -g print(f"RDKit: {rdkit.__version__}") # If that all worked, great! We can move onto the next section where we describe how to get started with RDKit. # ## 2. Manipulating Molecules using RDKit # # This section will describe different ways to represent molecules. We reintroduce the SMILES string format (you should have already seen this in Miguel's lecture) and describe how one can read in these strings using RDKit and plot the resulting molecules. # We then go on to show how you can convert SMILES to alternative molecular representations, such as molecular graphs and fingerprints. # # ### 2.1 Reading in SMILES strings # # SMILES (Simplified molecular-input line-entry system) proposed by Weininger (1988) is a popular method to represent molecules as ASCII strings. The string is created by printing out the nodes found on a traversal of the molecular graph. For our purposes it is not particularly important how this traversal happens but note how atoms are represented by their usual element symbol (i.e. carbon by 'C', oxygen by 'O', etc), bonds by the symbols `. - = # $ : / \`, branching by parentheses, and rings by numbers. # # If this is confusing don't worry, this will probably be made clearer by a few examples! Let's start by defining a string variable containing the SMILES representation of the paracetemol (i.e. acetaminophen) molecule, a popular painkiller (although I very much hope this notebook does _not_ require it!), and reading it into RDKit. # + paracetemol_str = 'CC(=O)Nc1ccc(O)cc1' paracetemol_mol = Chem.MolFromSmiles(paracetemol_str) Draw.MolToImage(paracetemol_mol) # - # Note that there is a one-to-many mapping between molecules and their SMILES string representations depending on how one traverses the molecular graph. For instance below we print out 5 different SMILES representations for paracetemol: # + rng = np.random.RandomState(10) # We will then print 5 paracetemol_random_smiles = [ss_utils.random_ordered_smiles(paracetemol_str, rng) for _ in range(5)] print(f"Paracetemol can be represented by any of these SMILES (and others): {', '.join(paracetemol_random_smiles)}.") # - # However, sometimes we want to ensure that we print out a unique SMILES for each molecule, which is useful for instance when comparing two SMILES strings. For this we can use RDKit to compute the _canonical_ SMILES. So in the next cell we read in each of these randomly chosen SMILES for paracetemol back in and convert them to canonical form: set([Chem.MolToSmiles(Chem.MolFromSmiles(smi), canonical=True) for smi in paracetemol_random_smiles]) # ^ set should only have one string in it -- incidently the same as we started with as I began with the canonical representation. # See we have ended up with only one representation, the canonical representation! # # Once we have converted the SMILES to a RDKit `Mol` object (which happened when running `Chem.MolFromSmiles`) we can manipulate it in different ways. For example, we can iterate through the atoms or bonds: # + # Iterate through the atoms. Print their symbol, atomic number, and number of Hydrogens for atm in paracetemol_mol.GetAtoms(): print(f"Atom element: {atm.GetSymbol()}, atomic number: {atm.GetAtomicNum()}, number of hydrogens {atm.GetTotalNumHs()}") print("\n\n") # Iterate through the bonds.. for bnd in paracetemol_mol.GetBonds(): print(f"Bond from {bnd.GetBeginAtomIdx()} to {bnd.GetEndAtomIdx()} and is of type {bnd.GetBondType()}.") # - # Can you match up the two oxygens printed out to the two oxygens in the molecular graph plotted earlier? # If you want you can experiment with obtaining more details about the particular atoms or bonds, the APIs are [here](https://www.rdkit.org/docs/cppapi/classRDKit_1_1Atom.html) and [here](https://www.rdkit.org/docs/cppapi/classRDKit_1_1Bond.html). # # Note that when we iterated through the atoms we also printed the number of hydrogen atoms attached. You may have spotted that these hydrogen atoms were not included in the original SMILES string. In general we ignore the hydrogen atoms (they are treated implicitly) but we can include them in SMILES strings if we wanted: print(Chem.MolToSmiles(paracetemol_mol, allHsExplicit=True)) # We can also print out to other string representations. For instance below we will print out the InChI and InChIKey representations (Heller, 2013): print(Chem.MolToInchi(paracetemol_mol)) print(Chem.MolToInchiKey(paracetemol_mol)) # ### 2.2 Moving beyond character string representations # # Of course, as you saw in the lecture earlier, character strings are not the only way to represent molecules. An alternative approach is to represent molecules as molecular fingerprints. For instance if we want to compute the Morgan fingerprints of the paracetemol molecule we can do so as follows: # + # We'll define a function to take in the SMILES string and return the Morgan fingerprint as a numpy array. # this function will be wrapped inside a least recently used (LRU) cache -- you can ignore this, # it just saves a bit of compute later @functools.lru_cache(int(1e6)) def morgan_fp_from_smiles(smiles_str, radius=2, number_bits=1024): mol = AllChem.MolFromSmiles(smiles_str) # although it is a bit vector we will represent it as float array in numpy so that can be input more easily into NN later return np.array(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=number_bits), dtype=np.float32) # Having set up this function we can now use it on the paracetemol SMILES string: print(f"Morgan fingerprint for paracetemol is {morgan_fp_from_smiles(paracetemol_str)}") # Most of the elements are zero but a few bits will also be set: print(f"The non-zero indices of the Morgan fingerprint for paracetemol" f" are {np.nonzero(morgan_fp_from_smiles(paracetemol_str))}") # - # Note unlike the mapping from SMILES to molecule, there is not necessarily a one-to-one mapping from fingerprint to molecule. # # These fingerprints can be used as feature vectors for ML models (as we shall see later). Fingerprints can also be used to compare molecules and obtain a relatively cheap-to-compute notion of similarity between different molecules. A common way to do this is through the Tanimoto similarity between two fingerprints: # # \begin{array} # \textrm{Tanimoto}(\textrm{fp}^1, \textrm{fp}^1) = \frac{\sum_i \textrm{fp}^1_i \wedge \textrm{fp}^2_i}{\sum_i \textrm{fp}^1_i \vee \textrm{fp}^2_i}, # \end{array} # # where $\wedge$ and $\vee$ are the "bitwise and" and "bitwise or" expressions respectively. # # 🧪 **Task 1:** Code up a function to take in two SMILES strings and compute their Tanimoto similarity. Use this function to look at the similarity between paracetemol and the molecules defined in the list below (feel free to also plot their structures). # + def tanimoto_similarity(smiles_str_1, smiles_str_2): # ====== TASK -- FILL IN ====== molecules_to_compare = { 'Glucose': 'OC[C@H]1OC(O)[C@H](O)[C@@H](O)[C@@H]1O', 'Thiamine (Vitamin B1)': 'Cc1c(sc[n+]1Cc2cnc([nH]c2=N)C)CCO', 'Ibuprofen': 'CC(C)Cc1ccc(cc1)[C@@H](C)C(=O)O' } for name, other_smi in molecules_to_compare.items(): # ====== TASK -- FILL IN ====== # - # Which is most similar using this metric? Does this match your intuitions? # # 🕰 **(optional) Task A -- Similarity Maps:** Look at producing a similarity map to visualize the similarity between paracetemol and the other molecules considered. RDKit has built-in functionality to produce these plots, which is documented [here](http://www.rdkit.org/docs/GettingStartedInPython.html#fingerprinting-and-molecular-similarity). # # # Fingerprints are useful as a vector representation that can be used directly with many common ML algorithms. However, it is also useful to have access to a graph representation of the molecule, for use with ML models that work directly on graphs, such as SVMs with graph kernels (Shervashidze, 2011) or graph neural networks (GNNs) (Duvenaud, 2015; Gilmer, 2017). For graph neural networks we need a suitable way to represent graphs as tensors/arrays so that they can be manipulated using deep learning libraries/frameworks. One possible useful array representation is the node feature list and adjacency matrix of a molecule: # + # Compute the node feature list class SymbolFeaturizer: """ Symbol featurizer takes in a symbol and returns an array representing its one-hot encoding. """ def __init__(self, symbols, feature_size=None): self.atm2indx = {k:i for i, k in enumerate(symbols)} self.indx2atm = {v:k for k,v in self.atm2indx.items()} self.feature_size = feature_size if feature_size is not None else len(symbols) def __call__(self, atom_symbol): out = np.zeros(self.feature_size) out[self.atm2indx[atom_symbol]] = 1. return out atm_featurizer = SymbolFeaturizer(['C', 'N', 'O']) node_features = [atm_featurizer(atm.GetSymbol()) for atm in paracetemol_mol.GetAtoms()] node_features = np.array(node_features) print(f"Node features:\n {node_features}") # Compute the adjacency matrix for paracetemol from rdkit num_nodes = paracetemol_mol.GetNumAtoms() adj_mat = np.zeros((num_nodes, num_nodes)) for bnd in paracetemol_mol.GetBonds(): adj_mat[bnd.GetBeginAtomIdx(), bnd.GetEndAtomIdx()] = bnd.GetBondTypeAsDouble() adj_mat[bnd.GetEndAtomIdx(), bnd.GetBeginAtomIdx()] = bnd.GetBondTypeAsDouble() print(f"\n\nAdjacency matrix:\n {adj_mat}") # - # Note that many of the elements of the adjacency matrix are zero -- our graph is sparse. The adjacency matrix is a useful representation in many ways, for instance we can quickly compute the degree of a node by looking along the relebant row. However, when we want to batch up multiple graphs together, using the adjacency matrix representation will require us to add padding to account for different sized graphs. An alternative is to store the edge information as a list of edges as well as a list of edge features. So for example with paracetemol we could represent it as: # # \begin{array} # \textrm{edge\_list} = \begin{bmatrix} # [0, 1] \\ # [1, 0] \\ # [1, 2] \\ # [2, 1] \\ # [1, 3] \\ # [3, 1] \\ # [3, 4] \\ # [4, 3] \\ # [4, 5] \\ # [5, 4] \\ # [5, 6] \\ # [6, 5] \\ # [6, 7] \\ # [7, 6] \\ # [7, 8] \\ # [8, 7] \\ # [7, 9] \\ # [9, 7] \\ # [9, 10] \\ # [10, 9] \\ # [10, 4] \\ # [4, 10] \\ # \end{bmatrix} # \end{array} # # \begin{array} # \textrm{edge\_features} = \begin{bmatrix} # 1 \\ # 1 \\ # 2 \\ # 2 \\ # 1 \\ # 1 \\ # 1 \\ # 1 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1 \\ # 1 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # 1.5 \\ # \end{bmatrix} # \end{array} # # # When we have assigned the following indices to the atoms: # # # # # Although, as you'll see later, it does not actually matter in which order we assign indices to the atoms, or the order in which we decide to include the edges -- the GNNs will be invariant to this ordering. Also note that, as the edges are undirected, we include them going both ways. # # # 🧪 **Task 2:** Code up a function to take in a RDKit `Mol` object and return three numpy arrays: the node features array (use the same atom featurizer we defined above), the edge list array and the edge features array. You can index the atoms however you like as long as you are consistent across the three different arrays. # # + def mol_to_edge_list_graph(mol: Chem.Mol, atm_featurizer: SymbolFeaturizer) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Function that takes in a RDKit molecule (of N atoms, E bonds) and returns three numpy arrays: * the node features array (of dtype np.float32, shape [N, d]), which is a one hot encoding of the element of each atom type. * the edge list (of dtype np.int32, shape [2E, 2]) that represents the start and end index of each edge. * the edge feature list (of dtype np.float32, shape [2E, 1]) which describes the feature type associated with each edge. """ # Node features # ====== TASK -- FILL IN ====== # Edge list and edge feature list # ====== TASK -- FILL IN ====== return node_features, edge_list, edge_feature_list node_features, edge_list, edge_feature_list = mol_to_edge_list_graph(paracetemol_mol, atm_featurizer) print(f"Node feature list:\n{node_features}") print(f"\nEdge list:\n{edge_list}") print(f"\nEdge feature list:\n{edge_feature_list}") # - # To check that the code you wrote is working correctly I have written a function that goes the other way (i.e. from these arrays back to a SMILES string). Note that the function you wrote is not reversible for all molecules (e.g. in the graph arrays representation we ignore stereochemistry) but should work okay and act as a useful sanity check with paracetemol. To check your code run the cell below and check that the output is `True`: # + sanity_check_passed = (Chem.MolToSmiles(paracetemol_mol, canonical=True) == ss_utils.graph_as_edge_list_to_canon_smiles(node_features, edge_list, edge_feature_list, atm_featurizer)) sanity_check_passed # - # ### 2.3 Summary and further reading # # This section described how we can represent molecules as fingerprints, SMILES strings and as node features/edge list arrays. In the section that follows we will train ML regressors on each of these representations. # # **RDKit** If you want to learn more about how to use RDKit, the RDKit [documentation](http://www.rdkit.org/docs/index.html) is great, in particular this [first rendered notebook](http://www.rdkit.org/docs/GettingStartedInPython.html) covers the majority of the stuff we have gone through here and far more! Also, whilst writing this notebook I spotted [this tweet](https://twitter.com/trjosephson/status/1296108489702424576?s=20), which has collected in its replies a collection of links to excellent resources. In particular, the (GitHub hosted) tutorials from [@PatWalters](https://github.com/PatWalters/workshop) and [@iwatobipen](https://github.com/iwatobipen/py4chemoinformatics) look amazing, and also cover regression on molecules which we shall investigate in the next section. # # We are using RDKit in this notebook as it has lots of well-documented features, works great with Python and is very popular for ML in Chemistry applications. I should also quickly point out that there are [other chemoinformatics toolkits out there](https://en.wikipedia.org/wiki/Cheminformatics_toolkits), which may be useful if you prefer working in other programming languages. Examples that I have used include the [Chemistry Development Kit](https://cdk.github.io/) in Java , the [Indigo Toolkit](https://lifescience.opensource.epam.com/indigo/) with a core written in C++ (with Python bindings), and [OpenBabel](https://github.com/openbabel/openbabel). # # **Fingerprints** Morgan fingerprints (or circular fingerprints (Rogers et al., 2010)) are not the only way to compute fingerprints for molecules. [RDKit](https://www.rdkit.org/docs/GettingStartedInPython.html#list-of-available-fingerprints) has a series of other fingerprinting methods implemented. Choosing fingerprints could be an dataset dependent decision, although Rinker and Landrum (2013) find that many popular fingerprints offer fairly similar performance in downstream tasks. # ## 3 Molecule Regression # # In this section we will go through four main models for performing regression on molecules: a basic feedforward NN (neural network) on fingerprints, a RNN (recurrent neural network) on SMILES strings, a convolutional neural network on SMILES strings, and a GNN on the the molecular graph representation. Before starting on this we will introduce a simple solubility dataset to benchmark these models on. This dataset is taken from (Duvenaud et al, 2015, §4.2) [from their code repo](https://github.com/HIPS/neural-fingerprint/tree/master/data/2015-05-24-delaney)), which in turn originally obtained it from (Delaney, 2004). # # ### 3.1 Solubility dataset # # We can load in the data using Pandas: # df = pd.read_csv('data/delaney-processed.csv') df.head() # We can also use Pandas to quickly summarize the data: df.describe() # The variable which we want to predict is in the "measured log solubility in mols per litre" column # # 🧪 **Task 3:** Plot a histogram of these measured log solubility values. # # + # ====== TASK -- FILL IN ====== # - # Note that RDKit includes a series of useful tools for working with Pandas, which are [documented here](http://rdkit.org/docs/source/rdkit.Chem.PandasTools.html). For instance, we can include the relevant molecule in each row of the dataframe: PandasTools.AddMoleculeColumnToFrame(df,'smiles','Molecule',includeFingerprints=False) df.head() # Before moving on, we'll split this dataframe into train and validation dataframes: # + def split_into_train_and_val_dfs(df: pd.DataFrame, train_proportion:float, rng: typing.Optional[np.random.RandomState]=None ) -> typing.Tuple[pd.DataFrame, pd.DataFrame]: """ splits this dataset into two: a training portion and a validation portion. :param df: Pandas Dataframe to split up. :param train_proportion: proportion to use as training dataset """ # If not passed in a random number generator create one here rng = np.random.RandomState(42) if rng is None else rng # Convert the proportion into a dataset size assert 0. <= train_proportion <= 1. number_for_training = int(np.ceil(train_proportion * df.shape[0])) # Create a random shuffling of the data perm = rng.permutation(df.shape[0]) # split up into the appropriate sizes train_df = df.iloc[perm[:number_for_training]].copy() val_df = df.iloc[perm[number_for_training:]].copy() return train_df, val_df train_df, val_df = split_into_train_and_val_dfs(df, 0.9) print("Shapes are: ", train_df.shape, val_df.shape) # - # ### 3.2 Training a NN on fingerprints # # Having set up our dataset we are now ready to create our first model! We first will train a regular feed forward NN on the fingerprints. I have written in the `ss_utils` module the training code for you, as the function `train_neural_network`. I highly encourage you to read over this function to see what is happening. As well as the data to train/validate on, this function requires the network to train as well as a function that converts SMILES strings into the required tensor to put into the network. # # # 🧪 **Task 4:** Code up a 2-layer Pytorch NN that operates on 1024 dimensional fingerprint features and predicts the solubility. If you haven't used PyTorch before then you may find the [following documentation helpful](https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html). This neural network should be a subclass of `nn.Module`. You can pick whatever hidden dimensionality/hidden activation function you like. # # # # # # # + # Write a simple (1-hidden layer NN) in PyTorch ff_nn: nn.Module = # ====== TASK -- FILL IN ====== # We also will need to add a transform for our datasets such that # the network gets fed in fingerprint tensors rather than SMILES strings def transform_ff_nn(smiles: str) -> torch.Tensor: fp = morgan_fp_from_smiles(smiles) tensor_fp = torch.tensor(fp, dtype=torch.float32) return tensor_fp # - # Having defined the network we can now train it: # + # A quick check that the ff_nn has been subclassed from nn.Module correctly assert isinstance(ff_nn, nn.Module), "The function you have written should be a subclass of nn.Module" # Then we train and evaluate out = ss_utils.train_neural_network(train_df, val_df, "smiles", "measured log solubility in mols per litre", transform_ff_nn, ff_nn) # Finally we print out as a table some of the results. display(HTML(tabulate.tabulate(out['out_table'], tablefmt="html"))) # - # We can plot these training and validation losses to better understand how training went (if the network has not converged feel free to run training for longer): # + # We'll plot using Matplotlib and Altair. # Altair is interactive which is nice, although I think matplotlib makes better static images when saving # this notebook to GitHub. ss_utils.plot_train_and_val_using_mpl(out['train_loss_list'], out['val_lost_list']) ss_utils.plot_train_and_val_using_altair(out['train_loss_list'], out['val_lost_list']) # - # We can also add our predictions to the dataframe such that we can compare them more easily with the ground truth: # + val_df['NN FP predictions'] = out['val_predictions'] val_df.head() # - # 🕰 **(optional) Task C -- Different Feedforward NNs/Fingeprint Sizes:** Here we used 1024 dimensional features and a simple 2 layer NN. Explore using different dimensional fingerprints and different network architectures. As you use smaller fingerprints one bit may correspond to more substructures. # How about trying also some [other regression models](https://scikit-learn.org/stable/supervised_learning.html#supervised-learning) or [other descriptors](https://www.rdkit.org/docs/source/rdkit.Chem.Descriptors.html)? # ### 3.3 Sequence based methods on SMILES string # # As discussed in Miguel's lecture an alternative to fingerprints is to run a sequence based model directly on the SMILES strings. Two ways to do this are using RNNs and CNNs, which we are going to code up in this subsection. These models both break up the sequence into a series of symbols: # # # # (taken from [Miguel's slides](http://mlss.ii.uam.es/mlss2018/speakers.html)) # # We can then in turn represent each of these symbols in tensor form by using one-hot encodings. We'll therefore start by writing the transform to do this before writing the CNN and RNN models. # # 🧪 **Task 5:** Write the code to take in a SMILES string and convert it to a tensor where each row is each symbol's one-hot encoding. So for instance acetaldehyde, CC=O, should be represented as: # # \begin{bmatrix} # [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\\ # [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\\ # [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\\ # [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\\ # [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\\ # \ldots # \end{bmatrix} # # Padding will be needed at the end so that all sequences have the same length (you may find the [PyTorch function](https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad) `F.pad` useful in this regard). You should use the `SymbolFeaturizer` class from earlier to compute the one-hot encodings. # This code will be used to transform the data for the RNN/CNN models in a similar way the function `transform_ff_nn` did for the feedforward NN model above. # + # This will calculate the maximum SMILES string size in our data, which is the # same as the length we should ensure all tensors are padded to so that they can # be batched: max_seq_size = df['smiles'].map(len).max() # This will calculate all the possible symbols in the smiles string: symbols_in_smiles = set(itertools.chain(*df['smiles'].tolist())) symbols_in_smiles = sorted(list(symbols_in_smiles)) # ...which we can then use to create a symbol featurizer symbol_featurizer = SymbolFeaturizer(symbols_in_smiles) # + class TransformSeqModel: def __init__(self, symbol_featurizer, max_seq_size): self.symbol_featurizer = symbol_featurizer self.max_seq_size = max_seq_size def __call__(self, smiles: str) -> torch.Tensor: """ Transforms SMILES strings into one-hot encodings of each symbol present. :returns: a tensor (dtype=torch.float32) of size [max_seq_size, one_hot_encoding_size] """ # ====== TASK -- FILL IN ====== transform_seq_model = TransformSeqModel(symbol_featurizer, max_seq_size) # You can check the function is correct by eyeballing the output for acetaldehyde # (alternatively you could write a function that converts back to SMILES to check for you) torch.set_printoptions(profile="full") torch.nonzero(transform_seq_model("CC=O")) # - # Having coded up a suitable transform we are left with creating the model! Let's start first with the RNN and then move onto the CNN. # # 🧪 **Task 6:** Complete the code below for a RNN to run on this sequence of one-hot encodings. For instance, you could use either a [GRU](https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU) or [LSTM](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html?highlight=lstm#torch.nn.LSTM). To form final predictions, project down the final hidden layer of the RNN to a one dimensional space using a linear layer. Be careful, the RNN will recieve the sequences _batch first_. # # + class RNNModel(nn.Module): def __init__(self, symbol_vocab_size: int): """ :param symbol_vocab_size: the dimension of the one-hot vectors, ie how many symbols we have in total (and also the initial channel size being fed into the RNN.) """ super().__init__() h_size = # ====== TASK -- FILL IN ====== (hidden size) self.rnn = # ====== TASK -- FILL IN ====== self.lin = nn.Linear(h_size, 1) def forward(self, x): """ :param x: tensor [batch_size, seq_size, symbol_vocab_size] :returns: tensor [batch_size, 1] """ _, out = # ====== TASK -- FILL IN ====== # out = [num_layers, batch_size, hidden_size] out = out[-1] # [batch_size, hidden_size] out = self.lin(out) # [batch_size, 1] return out rnn_model = RNNModel(len(symbols_in_smiles)) # - # We can check this model works by running it forward on two acetaldehydes batched up: # (this should produce a two element column vector) rnn_model(torch.stack([transform_seq_model("CC=O"), transform_seq_model("CC=O")])) # This done, we can train and evaluate our new model: # + # Train and evaluate out = ss_utils.train_neural_network(train_df, val_df, "smiles", "measured log solubility in mols per litre", transform_seq_model, rnn_model) # And then we print out as a table some of the results. display(HTML(tabulate.tabulate(out['out_table'], tablefmt="html"))) # - # We can again plot the train/validation loss curves and check for too little training time or any overfitting: # + ss_utils.plot_train_and_val_using_mpl(out['train_loss_list'], out['val_lost_list']) ss_utils.plot_train_and_val_using_altair(out['train_loss_list'], out['val_lost_list']) # - # This model can actually be harder to tune and choose sensible hyperparameters for than the simple feed forward neural network that we considered before. It is therefore useful to get a good baseline for performance, for instance by looking at the loss obtained by predicting the mean of the training set everwhere: # Your model's loss should hopefully be lower than this dummy baseline's loss: np.mean((val_df['measured log solubility in mols per litre'].values - train_df['measured log solubility in mols per litre'].mean())**2) # Did your model do better? If not, you probably want to go back and tune the RNN hyperparameters until it does. How do the different hyperparameters of the model affect performance? # # When you've finished exploring that, let's move on to creating the convolutional neural network (CNN). This network, shown on the bottom of Miguel's slide above, will operate on the same input as the RNN, so we can reuse the same transform code (i.e. `TransformSeqModel`) and only need to write the new model code. # # 🧪 **Task 7:** Code up the CNN. PyTorch has `nn.Conv1d`, `nn.Conv2d` and `nn.Conv3d` layers, read the [documentation](https://pytorch.org/docs/stable/nn.html#convolution-layers) and work out which one is appropriate here. Your model should consist of one of these convolutional layers followed by a pooling layer and then a linear projection. # # + class CNNModel(nn.Module): def __init__(self, symbol_vocab_size: int, seq_len:int): """ :param symbol_vocab_size: the size of the one hot vectors (also the initial channel size for the CNN) :param seq_len: the size of all sequences. """ super().__init__() # ====== TASK -- FILL IN ====== def forward(self, x): """ :param x: tensor [batch_size, seq_size, symbol_vocab_size (channel dim)] :returns: tensor [batch_size, 1] """ # ====== TASK -- FILL IN ====== return out cnn_model = CNNModel(len(symbols_in_smiles), max_seq_size) # - # we can again quickly check that it works before its trained by running it once forward. cnn_model(torch.stack([transform_seq_model("CC=O"), transform_seq_model("CC=O")])) # Having created the model we can now train it, the same way we have done for the RNN: # + # Then we train and evaluate out = ss_utils.train_neural_network(train_df, val_df, "smiles", "measured log solubility in mols per litre", transform_seq_model, cnn_model) # And then we print out as a table some of the results. display(HTML(tabulate.tabulate(out['out_table'], tablefmt="html"))) # + # and we can plot the loss curves: ss_utils.plot_train_and_val_using_mpl(out['train_loss_list'], out['val_lost_list']) ss_utils.plot_train_and_val_using_altair(out['train_loss_list'], out['val_lost_list']) # - # What pooling operation works best? How does performance in terms of loss, number of parameters and timings compare to the RNN? Does adding additional convolutional layers change this? # # 🕰 **(optional) Task C -- Augmented Sequences:** Earlier in this notebook we discussed how each molecule corresponds to many SMILES strings. However, in this section we have only fed into the networks one of the possible SMILES strings for each molecule. Consider training the sequence models in this section on an augmented dataset (i.e. create a transform that randomly chooses a different SMILES representation each time it is called), and see how this affects the final models performance. You can also implement a method to pool predictions made using the different SMILES representations at inference time. Bjerrum (2017) explores this idea. Do you obtain similar results? # # 🕰 **(optional) Task D -- Symbol Embeddings:** We fed in as features to the RNN/CNNs one hot encodings of each symbol. Consider using [learnt embeddings instead](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html) (the `nn.Embedding` class may be useful here). How does this affect performance? How many extra parameters does this result in? How does this modification compare to just adding an additional layer to your network and why? # Furthermore, as an additional related extension, can you group together some of the current symbols to reduce sequence sizes and the number of embeddings needed (i.e. currently we represent bromine as 'B' 'r' can you instead ensure it is kept as one symbol 'Br')? You may find the following regex (taken from Schwaller et al. (2019)) useful: # `"(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"` # # # # ### 3.4 Graph Neural Networks # # The final model we will consider here is a basic graph neural network. # # As you saw in Section 2 of this notebook, graphs require several tensors to represent them. Therefore, we will begin by creating a datastructure to keep these tensors together as well as to allow the easy batching of several graphs together (so that we can train our network using minibatches). We will then go on to creating the network itself before trying it out on the same small solubility dataset that the other methods have been running on. # # # 🧪 **Task 8:** Complete the `Graph` class below. To be more specific, you need to fill in the class method, `concatenate`, that given a list of individual graphs returns a new instance of the class with them all batched together. # # # + class Graphs: ATOM_FEATURIZER = SymbolFeaturizer(['Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cl', 'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Eu', 'F', 'Fe', 'Ga', 'Ge', 'H', 'He', 'Hf', 'Hg', 'I', 'In', 'Ir', 'K', 'La', 'Li', 'Mg', 'Mn', 'Mo', 'N', 'Na', 'Nd', 'Ni', 'O', 'Os', 'P', 'Pb', 'Pd', 'Pr', 'Pt', 'Rb', 'Re', 'Rh', 'Ru', 'S', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Te', 'Ti', 'Tl', 'V', 'W', 'Xe', 'Y', 'Yb', 'Zn', 'Zr']) # ^ you can change the number of symbols here to play with the dimensionality, # we only need to have the symbols: ['Br', 'C', 'Cl', 'F', 'I', 'N', 'O', 'P', 'S'] BOND_FEATURIZER = SymbolFeaturizer([1., 1.5, 2., 3.]) # ^ single, aromatic, double and triple bonds (see earlier how RDKit represents these as doubles.) def __init__(self, node_features: torch.Tensor, edge_list: torch.Tensor, edge_features: torch.Tensor, node_to_graph_id: torch.Tensor): """ A graph datastructure which groups together the series of tensors that represent the graph. Note that this datastructure also holds multiple molecule graphs as one large disconnected graph -- the nodes belonging to each molecule are described by node_to_graph_id. ## Further details on the individual tensors Say this graph represents acetone, CC(=O)C, and ethane, CC, and we're using a simple three dimensional one-hot encoding for 'C', 'O' and 'N' and a simple two dimensional one-hot encoding for the bonds 'SINGLE', 'DOUBLE' then the resulting tensors would look like: node_features = [[1. 0. 0.], [1. 0. 0.], [0. 1. 0.], [1. 0. 0.], [1. 0. 0.], [1. 0. 0.]] edge_list = [[0 1], [1 0], [1 2], [2 1], [1 3], [3 1], [4 5], [5 4]] edge_features = [[1. 0.], [1. 0.], [0. 1.], [0. 1.], [1. 0.], [1. 0.], [1. 0.], [1. 0.]] node_to_graph_id = [0 0 0 0 1 1] More generally we expect the different tensors to have the following datatypes and shapes (below N is number of nodes, E number of edges, h_n the feature dimensionality of node features and h_e the feature dimensionality of edge features): :param node_features: Tensor (dtype float32 , shape [N, h_n]) :param edge_list: Tensor (dtype int64 , shape [E, 2]) :param edge_features: Tensor (dtype float32 , shape [E, h_e]) :param node_to_graph_id: Tensor (dtype int64 , shape [N]) this contains for each node the associated graph it belongs to. So for instance if this Graph datastructure represented only one graph this should be all zeros, however if two then it should be zeros for the nodes corresponding to the first graph and then 1s for the second graph. Graph ids should start at one and consist of consectutive integers. """ self.node_features = node_features self.edge_list = edge_list self.edge_features = edge_features self.node_to_graph_id = node_to_graph_id def to(self, *args, **kwargs): """ Works in a similar way to the Tensor function torch.Tensor.to(...) and performs dtype and/or device conversion for the entire datastructure """ new_graph = type(self)(self.node_features.to(*args, **kwargs), self.edge_list.to(*args, **kwargs), self.edge_features.to(*args, **kwargs), self.node_to_graph_id.to(*args, **kwargs) ) return new_graph @classmethod def from_smiles_string(cls, smiles_str: str): """ Converts a SMILES string into the representation required by this datastructure. Making use of the code you wrote in Section 2! """ # Convert to form we need using previous code: mol = Chem.MolFromSmiles(smiles_str) node_features, edge_list, edge_features = mol_to_edge_list_graph(mol, cls.ATOM_FEATURIZER) edge_features = [cls.BOND_FEATURIZER(elem) for elem in edge_features] # ^ nb here we're converting the edge feature list into one-hot form # Convert to tensors: node_features = torch.tensor(node_features, dtype=torch.float32) edge_list = torch.tensor(edge_list, dtype=torch.int64) edge_features = torch.tensor(edge_features, dtype=torch.float32) node_to_graph_id = torch.zeros(node_features.shape[0], dtype=torch.int64) # ^we only (currently) have one molecule per SMILES so all the nodes can be assigned # the same id return cls(node_features, edge_list, edge_features, node_to_graph_id) @property def num_graphs(self): return torch.unique(self.node_to_graph_id).shape[0] @classmethod def concatenate(cls, list_of_graphs): """ This takes in a list of objects of this class and joins them to form one large disconnected graph. For instance say we have two individual `Graphs` instances, one for acetone (CC(=O)C) and one for ethane (CC) they might look like this (in pseudocode -- note also in practice are one-hot encoding is larger): acetone = Graphs( node_features = [[1. 0. 0.], [1. 0. 0.], [0. 1. 0.], [1. 0. 0.]] edge_list = [[0 1], [1 0], [1 2], [2 1], [1 3], [3 1]] edge_features = [[1. 0.], [1. 0.], [0. 1.], [0. 1.], [1. 0.], [1. 0.]] node_to_graph_id = [0 0 0 0] ) ethane = Graphs( node_features = [[1. 0. 0.], [1. 0. 0.]] edge_list = [[0 1], [1 0]] edge_features = [[1. 0.], [1. 0.]] node_to_graph_id = [0 0] ) and this function would transform them into one large disconnected graph minibatch_of_graphs = Graphs( node_features = [[1. 0. 0.], [1. 0. 0.], [0. 1. 0.], [1. 0. 0.], [1. 0. 0.], [1. 0. 0.]] edge_list = [[0 1], [1 0], [1 2], [2 1], [1 3], [3 1], [4 5], [5 4]] edge_features = [[1. 0.], [1. 0.], [0. 1.], [0. 1.], [1. 0.], [1. 0.], [1. 0.], [1. 0.]] node_to_graph_id = [0 0 0 0 1 1] ) """ # ====== TASK -- FILL IN ====== new_concatenated_graph = cls(node_features=new_node_features, edge_list=new_edge_lists, edge_features=new_edge_features, node_to_graph_id=new_new_node_ids) return new_concatenated_graph # We're now going to create an instance of this class and test the concatenate function # (you could also write a reverse function if you wanted to be sure your code worked correctly) acetone_g = Graphs.from_smiles_string('CC(=O)C') ethane_g = Graphs.from_smiles_string('CC') graph_of_both = Graphs.concatenate([acetone_g, ethane_g]) print(f"graph_of_both.node_features:\n{graph_of_both.node_features}\n\n") print(f"graph_of_both.edge_list:\n{graph_of_both.edge_list}\n\n") print(f"graph_of_both.edge_features:\n{graph_of_both.edge_features}\n\n") print(f"graph_of_both.node_to_graph_id:\n{graph_of_both.node_to_graph_id}\n\n") # - # Having created the `Graphs` datastructure we can now create our graph neural network (GNN) which gets fed in the `Graphs` class as input. At a high level, the GNN consists of a series of message passing steps, which update the node features. After computing richer node features in this manner, a graph-level representation is computed through a weighted sum of the node features, in a process described as an aggregation transformation (Johnson, 2017) (also often called a readout step (Gilmer et al.,2017)). Finally, this graph-level representation is projected down by a linear transform to predict the solubility property score. # # We are going to base the specifics of our GNN implementation on Gated Graph Neural Networks (Li et al., 2015), with the update function performed using a GRU. We will ignore implementing a global state or distinguishing the messages based on edge type -- these will be left as possible extensions to you! To be more specific, the node representations $\mathbf{m}_v^t$ for node $v$ at propagation step $t$ will be updated by: # # \begin{array} # \mathbf{m}_v^{t} = \textrm{GRU}(\sum_{j \in \mathcal{N}(v)} \mathbf{W} \mathbf{m}_j^{t-1}, \mathbf{m}_v^{t-1}), \tag{1} # \end{array} # # where $W$ is a learnt weight matrix, $\mathcal{N}(v)$ denotes the neighbour nodes of $v$, and $\textrm{GRU}$ refers to a Gated Recurrent Unit RNN (Cho et al., 2014). $\mathbf{m}_v^0$ will be set as the one-hot encodings of the atom type and we will proceed for $T$ iterations. Pictorially this process is shown below: # # # # # # We compute a graph level embedding, $\mathbf{g}$, for graph $\mathcal{G}$ through the aggregation function as follows (altogether this can be thought of as a learnable fingerprint function): # # \begin{array} # \mathbf{g} = \sum_{v \in \mathcal{G}} \sigma\left(f_\textrm{attn}(\mathbf{m}_v^T)\right) f_\textrm{proj}(\mathbf{m}_v^T), \tag{2} # \end{array} # # where $\sigma(\cdot)$ is the sigmoid function, and $f_\textrm{attn}(\cdot)$ and $f_\textrm{proj}(\cdot)$ are two linear projections; the sum is over all of the nodes (i.e. atoms) in the graph. Pictorially this looks like: # # # # # 🧪 **Task 9:** Fill in the missing code in the GNN implmentation below. You can see the missing pieces from the comments. # + class GNN(nn.Module): def __init__(self, node_feature_dimension, num_propagation_steps:int =4): super().__init__() self.num_propagation_steps = num_propagation_steps # called T above. # Our sub modules: self.message_projection = nn.Linear(node_feature_dimension, node_feature_dimension, bias=False) self.update_gru = nn.GRUCell(input_size=node_feature_dimension, hidden_size=node_feature_dimension, bias=True) self.attn_net = nn.Linear(node_feature_dimension, 1) self.proj_net = nn.Linear(node_feature_dimension, node_feature_dimension) self.final_lin = nn.Linear(node_feature_dimension, 1) def forward(self, graphs_in: Graphs): """ Produces a column vector of predictions, with each element in this vector a prediction for each marked graph in `graphs_in`. In the comments below N is the number of nodes in graph_in (across all graphs), d the feature dimension, and G is the number of individual molecular graphs. """ # 1. Message passing and updating m = graphs_in.node_features # shape: [N, d] for t in range(self.num_propagation_steps): projs = self.message_projection(m) # [N, d] # Update the node embeddings (eqn 1 above) # 1a. compute the sum for each node msgs = torch.zeros_like(m) # [N, d] msgs.index_add_(# ====== TASK -- FILL IN ======) # 1b. update the embeddings via GRU cell m = self.update_gru(msgs, m) # [N, d] # 2. Aggregation (eqn 2 above) # a compute weighted embeddings attn_coeffs = torch.sigmoid(self.attn_net(m)) # [N, 1] proj_embeddings = self.proj_net(m) # [N, d'] weighted_embeddings = attn_coeffs * proj_embeddings # perform the sum graph_embedding = torch.zeros(graphs_in.num_graphs, weighted_embeddings.shape[1], device=m.device, dtype=m.dtype) graph_embedding.index_add_(# ====== TASK -- FILL IN ======) # [G, d'] # 3. Final linear projection. final_prediction = self.final_lin(graph_embedding) # [G, 1] return final_prediction gnn = GNN(len(Graphs.ATOM_FEATURIZER.indx2atm)) # - # Before we can run our training loop with our new model we need to tell the PyTorch `Dataloader` (used in `ss_utils`) how to collate the graphs together when forming minibatches (the main machinery of how this happens you have already written in task 7 above). def collate_for_graphs(batch): """ This is a custom collate function for use minibatches of graphs along with their regression value. It ensures that we concatenate graphs correctly. Look at ss_utils to see how this gets used. """ # Split up the graphs and the y values list_of_graphs, list_of_targets = zip(*batch) list_of_graphs = list(list_of_graphs) list_of_targets = list(list_of_targets) # The graphs need to be concatenated (i.e. collated) using the function you wrote graphs = Graphs.concatenate(list_of_graphs) # The y values can use the default collate function as before. targets = data.dataloader.default_collate(list_of_targets) return graphs, targets # And now we can train! # + # Then we train and evaluate out = ss_utils.train_neural_network(train_df, val_df, "smiles", "measured log solubility in mols per litre", transform=Graphs.from_smiles_string, neural_network=gnn, collate_func=collate_for_graphs) # And then we print out as a table some of the results. display(HTML(tabulate.tabulate(out['out_table'], tablefmt="html"))) # + # and we can plot the loss curves: ss_utils.plot_train_and_val_using_mpl(out['train_loss_list'], out['val_lost_list']) ss_utils.plot_train_and_val_using_altair(out['train_loss_list'], out['val_lost_list']) # - # How did it perform? How did the the number of parameters compare to some of the alternative methods, is this what you expected? # # 🕰 **(optional) Task E -- GNN Modeling Choices:** There are quite a few design choices one can make when deciding on the architecture of a GNN (Gilmer, 2017; Brockschmidt, 2019). Play around with some of these choices. For instance: # * Currently the edge feature information is ignored. Consider instead using this information to adapt the $W$ matrix based on the associated edge type. # * What is the effect of using different initial atom features? For example could you add the valency and hydrogen count to the element one-hot encoding (Duvenaud et al., 2015 , §4.2)? # * Consider the addition of virtual edges between all nodes or a master virtual node such that messages can be passed more easily between nodes that are far apart (Gilmer et al., 2017, §5.2). # * Consider using attention (Veličković et al., 2017) in the sum when computing messages (Eqn. 1) to downweight/upweight the contribution of different neighbours. # # # 🕰 **(optional) Task F -- Combining Representations:** We have now gone through several alternative methods to compute vector representations of molecules. However, these can also be combined, for instance you could concatenate fingerprints and the GNN output to form a graph level representation (Yang et al., 2019, p3373). Try doing this and exploring different ways to combine features from the different models. # # # # ### Section 3.5 Summary and further reading # # In this section we looked at how to code up different regression models for molecules. In particular, we looked at NNs on fingerprints, CNN and RNNs on SMILES strings, and GNNs on molecular graphs. We tried to get some inutition for their performance and compute characteristics and offered pointers to how the basic implementations of each could be extended. I should quickly point out that we have not performed any extensive hyperparameter/architecture search or evaluated the models on more complicated datasets, both of which would be required in a more comprehensive comparison. If you wish to do this you could follow up on some of the extension tasks and consider some of the architectures/datasets used in the references provided! # # GNNs (and related models) have quite a long history in chemoinformatics, e.g. (Kireev, 1995; Merkwirth and Lengauer, 2005). They have also picked up a lot of interest recently in the ML community, where they have also been used to represent many other kinds of data such as physical systems, citation networks, knowledge graphs, or social networks (e.g. see (Kipf and Welling, 2017; Battaglia et al., 2016; Hu et al., 2020; Dai et al., 2017; Hamilton et al., 2017; Bronstein et al., 2016) and the references therein). Battaglia et al. (2018), especially §3-4, is well worth reading, as it unifies some of the existing approaches as does Gilmer et al. (2017). # # In this notebook we presented one pattern for implementing graph neural networks based on maintaing an edge list. This enabled the easy batching together of multiple graphs with different numbers of nodes (through the `concatenate` function you wrote). However, alternative patterns also have their own pros/cons, for instance you can store graphs as adjacency matrices and do the message passing step by matrix multiplication; you can find my code for playing around with such ideas [here](https://github.com/john-bradshaw/GNN). If you want well-documented, well-maintained libraries for working with GNNs, I recommend checking out [PyTorch Geometric](https://github.com/rusty1s/pytorch_geometric) or the [Deep Graph Library (DGL)](https://github.com/dmlc/dgl). You can see recent talks talking about what is new in these libraries at this [recent ICML workshop (time 3:42:42)](https://icml.cc/virtual/2020/workshop/5715). # ## References # # (1988) SMILES, a chemical language and information system. 1. Introduction to methodology and encoding rules. Journal of chemical information and computer sciences 28(1). American Chemical Society: 31–36. # # ., ., ., . and . (2013) ‘InChI - the worldwide chemical structure identifier standard’, Journal of cheminformatics, 5(1), p. 7. # # ., ., ., . and . (2011) ‘Weisfeiler-Lehman graph kernels’, Journal of machine learning research: JMLR, 12(9). Available at: https://jmlr.csail.mit.edu/papers/volume12/shervashidze11a/shervashidze11a.pdf. # # ., ., ., ., ., . and . (2015) ‘Convolutional Networks on Graphs for Learning Molecular Fingerprints’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1509.09292. # # . and . (2010) ‘Extended-connectivity fingerprints’, Journal of chemical information and modeling, 50(5), pp. 742–754. # # ., ., ., . and . (2017) ‘Neural Message Passing for Quantum Chemistry’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1704.01212. # # . and . (2013) ‘Open-source platform to benchmark fingerprints for ligand-based virtual screening’, Journal of cheminformatics, 5(1), p. 26. # # . (2017) ‘SMILES Enumeration as Data Augmentation for Neural Network Modeling of Molecules’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1703.07076. # # ., ., ., ., ., . and . (2019) ‘Molecular Transformer: A Model for Uncertainty-Calibrated Chemical Reaction Prediction’, ACS central science, 5(9), pp. 1572–1583. # # . (2017) ‘Learning Graphical State Transitions’, in ICLR. Available at: https://openreview.net/pdf?id=HJ0NvFzxl (Accessed: 2 August 2018). # # ., ., . and . (2015) ‘Gated Graph Sequence Neural Networks’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1511.05493. # # ., ., ., ., ., . and . (2014) ‘Learning Phrase Representations using RNN Encoder--Decoder for Statistical Machine Translation’, in Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1724–1734. # # ., ., ., ., . and . (2017) ‘Graph Attention Networks’, arXiv [stat.ML]. Available at: http://arxiv.org/abs/1710.10903. # # . (2019) ‘GNN-FiLM: Graph Neural Networks with Feature-wise Linear Modulation’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1906.12192. # # ., ., ., ., ., ., ., ., ., ., ., ., ., . (2019) ‘Analyzing Learned Molecular Representations for Property Prediction’, Journal of chemical information and modeling, 59(8), pp. 3370–3388. # # . (1995) ‘ChemNet: A Novel Neural Network Based Method for Graph/Property Mapping’, Journal of chemical information and computer sciences. American Chemical Society, 35(2), pp. 175–180. # # . and . (2005) ‘Automatic generation of complementary descriptors with molecular graph networks’, Journal of chemical information and modeling, 45(5), pp. 1159–1168. # # . and . (2017) ‘Semi-Supervised Classification with Graph Convolutional Networks’, in. ICLR. Available at: http://arxiv.org/abs/1609.02907. # # ., . (2017) ‘Representation Learning on Graphs: Methods and Applications’, arXiv [cs.SI]. Available at: http://arxiv.org/abs/1709.05584. # # ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., . and . (2018) ‘Relational inductive biases, deep learning, and graph networks’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1806.01261. # # ., ., ., . and . (2016) ‘Interaction Networks for Learning about Objects, Relations and Physics’, arXiv [cs.AI]. Available at: http://arxiv.org/abs/1612.00222. # # ., ., ., ., ., ., . and . (2020) ‘Open Graph Benchmark: Datasets for Machine Learning on Graphs’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/2005.00687. # # ., ., ., . and . (2017) ‘Learning Combinatorial Optimization Algorithms over Graphs’, arXiv [cs.LG]. Available at: http://arxiv.org/abs/1704.01665. # # . (2020). Graph Representation Learning. Morgan & Claypool, forthcoming . # # ., ., ., . and . (2016) ‘Geometric deep learning: going beyond Euclidean data’, arXiv [cs.CV]. Available at: http://arxiv.org/abs/1611.08097. # # . (2004) ‘ESOL: estimating aqueous solubility directly from molecular structure’, Journal of chemical information and computer sciences, 44(3), pp. 1000–1005. # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Tag 1. Kapitel 1. Grundlagen # # ## Lektion 8. Hilfe und Dokumentation in R # # In R gibt es mehrere eingebaute Möglichkeiten, um Hilfe zu bekommen: # # * help(thema) - Dokumentation zum Thema # * help.search("thema") - Die Hilfe durchsuchen # * apropos("thema") - Ergibt die Namen aller Objekte in der Suche, die "thema" entsprechen # * help.start() - Startet die HTML Version der Hilfe # * str(a) - Zeigt die interne *Str*uktur eine R Objekts an # * summary(a) - Gibt eine Zusammenfassung. Für numerische Objekte ist es eine statistische. Für andere Onjekte gibt es andere Versionen # * ls() - Zeigt Objekte im Suchpfad # * ls.str() - Führt `str()` für jedes Objekt im Suchpfad aus # * dir() - Zeigt die Dateien im aktuellen Verzeichnis # * methods(a) - Zeigt die S3 Methoden für a # * methods(class=class(a)) - Listet alle Methoden auf, die für ein Objekt der Klasse a verwendet werden können # # # ## Hilfe aufrufen # Hilfe für Vektor aufrufen help(vector) # Hilfe für Matrix aufrufen help(matrix) # Hilfe für List aufrufen help(list) # ## In Dokumentation suchen # # *Tipp: auch in R Studio ausprobieren* # Mit search()-Methode (bei R-Notebook wird ein neues Suchfenster geöffnet) help.search('vector') # ... oder mit ??-Zeichen ??vector # ## Statistische Zusmmenfassung abrufen v1 <- c(100,200,300,400,500,600,700,900) summary(v1) v2 <- 1:100 summary(v2) # # Herzlichen Glückwunsch! Sie sind mit Lektion 8 fertig. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch from xautodl import spaces from xautodl.xlayers import super_core def _create_stel(input_dim, output_dim, order): return super_core.SuperSequential( super_core.SuperLinear(input_dim, output_dim), super_core.SuperTransformerEncoderLayer( output_dim, num_heads=spaces.Categorical(2, 4, 6), mlp_hidden_multiplier=spaces.Categorical(1, 2, 4), order=order, ), ) # - batch, seq_dim, input_dim = 1, 4, 6 order = super_core.LayerOrder.PreNorm out1_dim = spaces.Categorical(12, 24, 36) out2_dim = spaces.Categorical(24, 36, 48) out3_dim = spaces.Categorical(36, 72, 100) layer1 = _create_stel(input_dim, out1_dim, order) layer2 = _create_stel(out1_dim, out2_dim, order) layer3 = _create_stel(out2_dim, out3_dim, order) model = super_core.SuperSequential(layer1, layer2, layer3) print(model) inputs = torch.rand(batch, seq_dim, input_dim) outputs = model(inputs) abstract_space = model.abstract_search_space abstract_space.clean_last() abstract_child = abstract_space.random(reuse_last=True) # print("The abstract child program is:\n{:}".format(abstract_child)) model.enable_candidate() model.set_super_run_type(super_core.SuperRunMode.Candidate) model.apply_candidate(abstract_child) outputs = model(inputs) print(outputs.shape) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: transfomers # language: python # name: transformers # --- # + import os from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient from IPython.display import Markdown, display from requests import post, put def printmd(string): display(Markdown(string)) # - # # Test our text on our local running API # + API_KEY = "[YourSecretKeyCanBeAnything]" URL_LOCAL = f"http://0.0.0.0:5000/api/extraction" WEB_APP_URL = "https://[your_web_app].azurewebsites.net" # This is the deployed web app format: URL https://[appname].azurewebsites.net def summarize(text): try: headers = { # Request headers "Content-Type": "application/json", "Ocp-Apim-Subscription-Key": API_KEY, } body = { "values": [ { "recordId": "0", "data": { "text": text } } ] } url = WEB_APP_URL resp = post(url=url, json=body, headers=headers) result_response = resp.json() return result_response except Exception as e: print('Exception', e) return result_response # Download test data directory = r"../data/" for filename in os.listdir(directory): with open(os.path.join(directory, filename)) as txt: text_to_summarise = txt.read() printmd(f"***Full Text***") print(text_to_summarise.replace('&', '&')) result_response = summarize(text_to_summarise.replace('"', '').replace('&', '&')) printmd(f"**Summarized Text**") print(''.join(result_response['values'][0]['data']['summary'])) # - # # Deploy the PowerSkill to Azure Search # ## Create the data source # + # Let's create a data source API_KEY = "" # Your ACS API Key ACS_URL = "https://[your_search_instance].search.windows.net" # Your ACS URL format https://[your ACS instance].search.windows.net DATA_SOURCE = "text-summarization-datasource" # The name for your data source CONTAINER_NAME = "docs" KEY= "[YourSecretKeyCanBeAnything]" # Set the KEY value you deployed your Web App with [YourSecretKeyCanBeAnything] WEB_APP_URL = "https://[your_web_app].azurewebsites.net" # This is the deployed web app format: URL https://[appname].azurewebsites.net json_text = { "name" : DATA_SOURCE, "type" : "azureblob", "credentials" : { "connectionString" : "DefaultEndpointsProtocol=https;AccountName=noledge;AccountKey=;EndpointSuffix=core.windows.net"}, # This is your azure blob connection string "container" : { "name" : CONTAINER_NAME } # The name of the container where the data files are } headers = { "api-key": API_KEY, "Content-Type": "application/json", } try: url = f"{ACS_URL}/datasources?api-version=2020-06-30" resp = post(url=url, json=json_text, headers=headers) result_response = resp.json() if resp.status_code == 403: print("Authorisation Failed: Check that your API KEY value is correct") if resp.status_code == 201: print("Success creating data source") except Exception as e: print('Exception creating data source', e) # - # ## Now we create the index # + INDEX_NAME = "text-summarization-index" # The name for the index json_text = { "name" : INDEX_NAME, "fields": [ { "name": "id", "type": "Edm.String", "key": True, "searchable": False }, { "name": "file_name", "type": "Edm.String", "searchable": False }, { "name": "size", "type": "Edm.Int64", "searchable": False }, { "name": "last_modified", "type": "Edm.DateTimeOffset", "searchable": False }, { "name": "content", "type": "Edm.String", "searchable": True, "filterable": False, "sortable": False, "facetable": False }, { "name": "summary", "type": "Edm.String", "searchable": True, "filterable": False, "sortable": False, "facetable": False }, { "analyzer": "standard.lucene", "facetable": False, "filterable": False, "indexAnalyzer": None, "key": False, "name": "organization", "retrievable": True, "searchAnalyzer": None, "searchable": True, "sortable": False, "synonymMaps": [], "type": "Collection(Edm.String)" }, { "analyzer": "standard.lucene", "facetable": False, "filterable": False, "indexAnalyzer": None, "key": False, "name": "person", "retrievable": True, "searchAnalyzer": None, "searchable": True, "sortable": False, "synonymMaps": [], "type": "Collection(Edm.String)" }, { "analyzer": "standard.lucene", "facetable": False, "filterable": False, "indexAnalyzer": None, "key": False, "name": "location", "retrievable": True, "searchAnalyzer": None, "searchable": True, "sortable": False, "synonymMaps": [], "type": "Collection(Edm.String)" }, { "analyzer": "standard.lucene", "facetable": False, "filterable": False, "indexAnalyzer": None, "key": False, "name": "url", "retrievable": True, "searchAnalyzer": None, "searchable": True, "sortable": False, "synonymMaps": [], "type": "Collection(Edm.String)" } ] } try: url = f"{ACS_URL}/indexes?api-version=2020-06-30" resp = post(url=url, json=json_text, headers=headers) result_response = resp.json() if resp.status_code == 403: print("Authorisation Failed: Check that your API KEY value is correct") if resp.status_code == 400: print(f"Error", resp.text) if resp.status_code == 201: print("Success creating index") except Exception as e: print('Exception creating index', e) # - # ## Now we create the skill set # + # Note we are passing in the secret header key and the inference API URL to the skillset SKILLSET_NAME = "text-summarization-skillset" # The name of your skillset COGSVC_KEY = "" # This is your Cognitive Services key that resides in the same region as ACS (used to compare custom vision captions and object detection) json_text = { "description": "Crack documents.", "skills": [ { "@odata.type": "#Microsoft.Skills.Text.EntityRecognitionSkill", "categories": [ "Organization", "Location", "Person", "Url" ], "defaultLanguageCode": "en", "inputs": [ { "name": "text", "source": "/document/content" } ], "outputs": [ { "name": "organizations", "targetName": "organizations" }, { "name": "persons", "targetName": "persons" }, { "name": "locations", "targetName": "locations" }, { "name": "urls", "targetName": "urls" } ] }, { "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill", "description": "A custom skill that summarizes the text", "uri": f"{WEB_APP_URL}/api/extraction", "timeout": "PT160S", "batchSize": 1, "context": "/document/content", "httpHeaders": { "Ocp-Apim-Subscription-Key": KEY }, "httpMethod": "POST", "inputs": [ { "name": "text", "source": "/document/content" } ], "outputs": [ { "name": "summary" } ] } ], "cognitiveServices": { "@odata.type": "#Microsoft.Azure.Search.CognitiveServicesByKey", "description": "cogsvc", "key": COGSVC_KEY } } try: url = f"{ACS_URL}/skillsets/{SKILLSET_NAME}?api-version=2020-06-30" resp = put(url=url, json=json_text, headers=headers) result_response = resp.json() if resp.status_code == 403: print("Authorisation Failed: Check that your API KEY value is correct") if resp.status_code == 400: print(f"Error", resp.text) if resp.status_code == 201: print("Success creating skillset") except Exception as e: print('Exception creating skillset', e) # - # ## Now we create the indexer # + json_text = { "name": "text-summarization-indexer", "dataSourceName": DATA_SOURCE, "targetIndexName": INDEX_NAME, "skillsetName": SKILLSET_NAME, "parameters": { "configuration": { "allowSkillsetToReadFileData": True } }, "outputFieldMappings": [ { "sourceFieldName" : "/document/organizations", "targetFieldName" : "organization" }, { "sourceFieldName" : "/document/persons", "targetFieldName" : "person" }, { "sourceFieldName" : "/document/locations", "targetFieldName" : "location" }, { "sourceFieldName" : "/document/urls", "targetFieldName" : "url" }, { "sourceFieldName": "/document/content/summary", "targetFieldName": "summary" } ] } try: url = f"{ACS_URL}/indexers?api-version=2020-06-30" resp = post(url=url, json=json_text, headers=headers) result_response = resp.json() if resp.status_code == 403: print("Authorisation Failed: Check that your API KEY value is correct") if resp.status_code == 400: print(f"Error", resp.text) if resp.status_code == 201: print("Success creating indexer") except Exception as e: print('Exception creating indexer', e) # - # ## Let's go and test the ACS index # + searchterm = "" # Create a client credential = AzureKeyCredential(API_KEY) client = SearchClient(endpoint=ACS_URL, index_name=INDEX_NAME, credential=credential) results = client.search(search_text=searchterm, top=5) for i, result in enumerate(results): print(f"Document {i}") printmd(f"***Locations***") print(result['location']) printmd(f"***Organizations***") print(result['organization']) printmd(f"***Persons***") print(result['person']) printmd(f"**Summary**") print(result['summary']) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Word Embeddings # * NLP trabaja con diferentes formas de data discreta. # * Palabras # * Las palabras vienen de un conjunto finito (aka vocabulary) # * Otros tipos de data discreta: # * caracteres # * POS tags # * Named entities # * items en un catalogo de productos # * Representar tipos discretos como vectores ha sido parte del exito de NLP. # * Cuando los tipos discretos son palabras la representacion vectorial se conoce como **word embedding** # * Count based -> TF-IDF # * A nosotros nos interesan metodos basados en aprendizaje o prediccion. # * learning-based embeddings # * prediction-based embeddings # * Las representaciones se aprenden maximizando un objetivo para una tarea de aprendizaje especifica # ## Porque aprender embeddings? # 1. Reducir la dimensionalidad hace que sean computacionalmente mas eficientes. # 2. Las representaciones basadas en conteos resultand en vectores con alta dimensionalidad. # * codifica informacion similar de forma redundante # * No comparte informacion estadistica # 3. Usar inputs con alta dimensionalidad crea problemas para la optimizaion. # * Los modelos de deep learning tienen millones de parametros # 4. Representaciones aprendidas (o ajustadas) de data especifica para una tarea son optimas para esa tarea. # * Con enfoques que utilizan heuristicas, no esta claro si son relevantes para la tarea. # ## Eficiencia de los Embeddings # # * Vector one-hot multiplicando un layer linear. # ![Eficiencia de los embeddings](../assets/one_hot_linearl.png) # * Como el vector on-hot esta lleno de ceros y un 1, el lugar del 1 va a actuar como un seleccionador en la multiplicacion de matrices. # * Funciona pero es computacionalmente costoso e ineficiente. # * El vector one-hot esta multiplicando cada numer en la matriz de pesos de la capa linear y calculando la suma para cada fila. # * Basado en esto, podriamos ignorar la multiplicacion y en vez usar un entero como indice para jalar la fila seleccionada. # ## Enfoques para aprender word embeddings # * El objetivo es: # * entender que son los word embeddings # * como y donde son aplicables # * como usarlos de forma de forma segura en un model # * sus limitaciones # # * En realidad, rara vez se encuentra uno en una situacion donde se necesite desarrollar un algoritmo para entrenar word embeddings. # * Todos los metodos de word embeddings se entrenan solo con palabras (unlabeled data) de forma supervisada. # * Esto es posible creando tareas supervisadas donde la data esta implicitamente etiquetada. # * La representacion esta optimizada a resolver la tarea que creamos, caputrando propiedades estadisticas y linguisticas del corpus. # ### Tareas auxiliares # * Dada una secuencia de palabras, predecir la siguiente palabra. _language modeling_ # * Dada una secuencia de palabras antes y despues, predecir la palabra restante. # * Data una palabra, predecir palabras que ocurren dentro de una ventana, independiente de la posicion. # **Ejemplos:** # * GloVe # * Continuous Bag-of-Words (CBOW) # * Skipgrams # ## Usos practicos de Word Embeddings pre entrenadas # * Corpus grandes - como todo Google News, Wikipedia, o [Common Crawl](commoncrawl.org) # * Disponibles de forma gratuita para descargar y usar. # ### Cargando embeddings # # * Normalmente vienen en el siguiente formato: # * cada linea empieza con la palabra que esta siendo _embedded_ # * seguida por la secuencia de numeros (representacion vectorial) # * el largo de esta secuencia es la dimension de su representacion (aka _embedding dimension_) # * el _embedding dimension_ normalmente esta en los cientos # * el numero de tokens es usualmente el tamanio del vocabulario y en los millones. import torch import torch.nn as nn from tqdm import tqdm from annoy import AnnoyIndex import numpy as np class PreTrainedEmbeddings(object): """ A wrapper around pre-trained word vectors and their use """ def __init__(self, word_to_index, word_vectors): """ Args: word_to_index (dict): mapping from word to integers word_vectors (list of numpy arrays) """ self.word_to_index = word_to_index self.word_vectors = word_vectors self.index_to_word = {v: k for k, v in self.word_to_index.items()} self.index = AnnoyIndex(len(word_vectors[0]), metric='euclidean') print("Building Index") for _, i in self.word_to_index.items(): self.index.add_item(i, self.word_vectors[i]) self.index.build(50) print("Finished") @classmethod def from_embeddings_file(cls, embedding_file): """Instantiate from pre-trained vector file. Vector file should be of the format: word0 x0_0 x0_1 x0_2 x0_3 ... x0_N word1 x1_0 x1_1 x1_2 x1_3 ... x1_N Args: embedding_file (str): location of the file Returns: instance of PretrainedEmbeddigns """ word_to_index = {} word_vectors = [] with open(embedding_file) as fp: for line in fp.readlines(): line = line.split(" ") word = line[0] vec = np.array([float(x) for x in line[1:]]) word_to_index[word] = len(word_to_index) word_vectors.append(vec) return cls(word_to_index, word_vectors) def get_embedding(self, word): """ Args: word (str) Returns an embedding (numpy.ndarray) """ return self.word_vectors[self.word_to_index[word]] def get_closest_to_vector(self, vector, n=1): """Given a vector, return its n nearest neighbors Args: vector (np.ndarray): should match the size of the vectors in the Annoy index n (int): the number of neighbors to return Returns: [str, str, ...]: words that are nearest to the given vector. The words are not ordered by distance """ nn_indices = self.index.get_nns_by_vector(vector, n) return [self.index_to_word[neighbor] for neighbor in nn_indices] def compute_and_print_analogy(self, word1, word2, word3): """Prints the solutions to analogies using word embeddings Analogies are word1 is to word2 as word3 is to __ This method will print: word1 : word2 :: word3 : word4 Args: word1 (str) word2 (str) word3 (str) """ vec1 = self.get_embedding(word1) vec2 = self.get_embedding(word2) vec3 = self.get_embedding(word3) # compute the fourth word's embedding spatial_relationship = vec2 - vec1 vec4 = vec3 + spatial_relationship closest_words = self.get_closest_to_vector(vec4, n=4) existing_words = set([word1, word2, word3]) closest_words = [word for word in closest_words if word not in existing_words] if len(closest_words) == 0: print("Could not find nearest neighbors for the computed vector!") return for word4 in closest_words: print("{} : {} :: {} : {}".format(word1, word2, word3, word4)) embeddings = PreTrainedEmbeddings.from_embeddings_file('data/glove/glove.6B.100d.txt') # ## Relaciones entre word embeddings # * El feature principal es que codifican relaciones semanticas y sintacticas que se manifiestan como regularidades en el uso de palabras. # * Por ejemplo, hablamos de perros y gatos en formas similares. # * Como consecuencia sus embeddings estan mucho mas cercanos entre ellos que entre los embeddings de otros animales como elefantes. # * Podemos explorar las relaciones semanticas codificadas en los embeddings de varias formas. # # **Tarea de analogia** # * Palabra1 : Palabra2 :: Palabra3 : ______ # 1. Palabra2 - Palabra1 # * Esta vector de diferencias codifica la relacion entre la Palabra1 y Palabra2 # 2. Sumarle esa diferencia a la Palabra3 # * Esto produce un vector cercano a la Palabra4 donde esta el espacio en blanco # 3. Buscar el nearest-neighbor en el indice con el vector resultante resuelve el problema de analogia. embeddings.compute_and_print_analogy('man', 'he', 'woman') embeddings.compute_and_print_analogy('fly', 'plane', 'sail') embeddings.compute_and_print_analogy('cat', 'kitten', 'dog') embeddings.compute_and_print_analogy('blue', 'color', 'dog') embeddings.compute_and_print_analogy('leg', 'legs', 'hand') embeddings.compute_and_print_analogy('toe', 'foot', 'finger') embeddings.compute_and_print_analogy('talk', 'communicate', 'read') embeddings.compute_and_print_analogy('blue', 'democrat', 'red') # ## Una de las analogias mas comunes codifica roles de genero # # # **Diferenciar entre regularidades en el lenguaje y sesgos culturales es dificil.** # # Ver: # [Ethics in NLP](ethicsinnlp.org) embeddings.compute_and_print_analogy('man', 'king', 'woman') embeddings.compute_and_print_analogy('man', 'doctor', 'woman') # ## No siempre son correctas embeddings.compute_and_print_analogy('fast', 'fastest', 'small') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Model Summary # - # After the hyper parameters search on Microsoft Azure this notebook report the results. # Import libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn import metrics from sklearn.metrics import f1_score from sklearn.metrics import precision_score from sklearn.model_selection import train_test_split sns.set_style('white') df = pd.read_csv('../data/processed/final_down.csv') df.drop(labels = 'Unnamed: 0', axis = 1, inplace = True) X = df.iloc[:,:-1] y = df['Satisfaction'] # Train-test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =0.30, stratify=y, random_state= 42) # + # Useful function import pickle from sklearn import tree def metric(classifier, X_train, y_train, X_test, y_test, save_fig = True, name = None, save_mod = True, save_tree = False): ''' classifier: model already initialized save_tree = False, If True, then save and export tree save_fig = True, Save fig? name = None, Name of the model save_mod = True, Dump model? Return false positive rate, true positive rate, AUC. ''' classifier.fit(X_train, y_train) if save_mod: pickle.dump(classifier, open('../models/'+name, 'wb')) else: pass if save_tree: dotfile = open("../reports/figures/tree/tree.dot", 'w') dotfile = tree.export_graphviz(classifier, out_file = dotfile, feature_names = X.columns) else: pass y_pred = classifier.predict(X_test) y_pred_train = classifier.predict(X_train) print("f1=====\ntrain: %.3f\ntest: %.3f\n=====" % (f1_score(y_train, y_pred_train), f1_score(y_test, y_pred) )) print("\nPrecision=====\ntrain: %.3f\ntest: %.3f\n=====" % (precision_score(y_train, y_pred_train), precision_score(y_test, y_pred) )) print("") sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt = '2d', cmap='Reds') plt.title('Confusion matrix - ' + name) if save_fig: plt.savefig('../reports/figures/models/' + name + '.png') else: pass plt.show() print(classification_report(y_test, y_pred)) fpr, tpr, auc = roc(classifier, X_test, y_test, y_pred, name) return (fpr, tpr, auc, classification_report(y_test, y_pred)) def roc(model, X_test, y_test, y_pred, name, save_fig = True): y_probs = model.predict_proba(X_test) fpr, tpr, thresholds1=metrics.roc_curve(y_test, y_probs[:,1]) import matplotlib.pyplot as plt plt.plot(fpr, tpr, label='ROC') plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC - ' + name) plt.legend() if save_fig: plt.savefig('../reports/figures/models/' + name + 'AUC.png') else: pass plt.show() auc = metrics.roc_auc_score(y_test, y_probs[:,1]) print('AUC: %.2f' % auc) return (fpr, tpr, auc) # + [markdown] tags=[] # # KNN model # - from sklearn.neighbors import KNeighborsClassifier model_knn = KNeighborsClassifier(n_neighbors=13) fpr1, tpr1, auc1, report1 = metric(model_knn,X_train,y_train,X_test,y_test, name = 'KNN') fpr1 tpr1 auc1 print(report1) # + [markdown] tags=[] # # Tree classification # - from sklearn.tree import DecisionTreeClassifier model_tree = DecisionTreeClassifier(criterion='gini', max_depth=11, min_samples_leaf=7, min_samples_split=9) fpr2, tpr2, auc2, report2 = metric(model_tree,X_train,y_train,X_test,y_test, name = 'TreeClassifier', save_tree=True) # + [markdown] tags=[] # # Naive bayes # + from sklearn.naive_bayes import GaussianNB from sklearn.metrics import f1_score model = GaussianNB() model.fit(X_train, y_train) y_pred=model.predict(X_test) pickle.dump(model, open('../models/NaiveBayes', 'wb')) print("f1_score: ", f1_score(y_test, y_pred)) print("f1_test: ", f1_score(y_test, y_pred)) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt = '2d', cmap='Reds') plt.title('Confusion matrix - Naive Bayes') plt.savefig('../reports/figures/NaiveBayes.png') plt.show() report3 = classification_report(y_test, y_pred) print(classification_report(y_test, y_pred)) # - fpr3,tpr3, auc3=roc(model, X_test, y_test, y_pred, 'Naive Bayes') # + [markdown] tags=[] # # Logistic Regression # - from sklearn.linear_model import LogisticRegression model = LogisticRegression(C=10, max_iter=1000) fpr4, tpr4, auc4, report4=metric(model,X_train,y_train,X_test,y_test, name = 'Logistic Regression') # + sns.set_style('white') coeff=pd.DataFrame() coeff["feature"]=X_train.columns coeff["w"]=model.coef_[0] coeff.sort_values(by=['w'], inplace=True) fig = plt.figure(figsize=(10,10)) sns.barplot(data=coeff, y="feature", x="w", palette="Blues_d", orient="h") sns.set(rc={'figure.figsize':(6,4)}) plt.savefig('../reports/figures/logistic_coef.png', bbox_inches = 'tight', dpi = 200) plt.show() # + [markdown] tags=[] # # SVM # - from sklearn.svm import SVC sns.set_style('white') model_svc = SVC(C=6, kernel='rbf',probability=True) fpr5, tpr5, auc5, report5 = metric(model_svc,X_train, y_train, X_test, y_test, name = 'SVM') # + [markdown] tags=[] # # Multi-layer perceptron # - from sklearn.neural_network import MLPClassifier model_MLP=MLPClassifier(hidden_layer_sizes=(180, 40, 10), alpha=0.7, max_iter=2000) fpr6, tpr6, auc6, report6=metric(model_MLP,X_train,y_train,X_test,y_test, name ='Multi Layer Perceptron') # # ROC, AUC and Report comparison reports = [report1, report2, report3, report4, report5, report6] models = ["KNN", "Tree", "NB", "Logistic", "SVM", "NeuralNet"] fig = plt.figure(1, figsize=(8,5)) plt.style.use('default') plt.plot(fpr1, tpr1, label= "KNN") plt.plot(fpr2, tpr2, label= "Tree") plt.plot(fpr3, tpr3, label= "NB") plt.plot(fpr4, tpr4, label= "Logistic") plt.plot(fpr5, tpr5, label= "SVM") plt.plot(fpr6, tpr6, label= "NeuralNet") plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic Curve', weight='bold', size=13) plt.legend() plt.savefig('../reports/figures/ROCcurves.png', dpi = 200) plt.show() AUCs = {"Model": ["KNN", "Tree", "NB", "Logistic", "SVM", "NeuralNet"], "AUC":[auc1, auc2, auc3, auc4, auc5, auc6]} AUCs = pd.DataFrame(AUCs) AUCs.set_index(keys = AUCs['Model'], inplace=True) AUCs.sort_values(by='AUC', ascending=False, inplace=True) fig = plt.figure(1, figsize=(8,5)) sns.barplot(x = 'Model', y='AUC', data = AUCs, palette='Blues_r_d'); plt.xlabel('') plt.title('AUC Comparison', weight = 'bold', fontsize = 13) plt.savefig('../reports/figures/AUCs.png', dpi = 200) for report, model in zip(reports, models): print("==========\n" + model + "\n" + report + "\n==========\n") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import webbrowser new = 2 # open in a new tab, if possible #// open an HTML file on my own(Windows) computer url = "file://index.html" webbrowser.open(url, new=new) # + url = 'http://docs.python.org/' # Open URL in a new tab, if a browser window is already open. webbrowser.open_new_tab(url) # - from IPython.display import IFrame docs = IFrame('index.html', width=1000, height=1000) docs # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Creating city models and objects # # In this tutorial we explore how to create new city models with using `cjio`'s # API. # + pycharm={"is_executing": false, "name": "#%%\n"} from pathlib import Path from cjio import cityjson from cjio.models import CityObject, Geometry # + [markdown] pycharm={"name": "#%% md\n"} # Set up paths for the tutorial. # # + pycharm={"is_executing": false, "name": "#%%\n"} package_dir = Path(__name__).resolve().parent.parent.parent schema_dir = package_dir / 'cjio' / 'schemas'/ '1.0.0' data_dir = package_dir / 'tests' / 'data' # + [markdown] pycharm={"name": "#%% md\n"} # ## Creating a single CityObject # # We are building a single CityObject of type *Building*. This building has an # LoD2 geometry, thus it has Semantic Surfaces. The geometric shape of the # building is a simple cube (size 10x10x10), which is sufficient for this # demonstration. # # The idea is that we create empty containers for the CityModel, CityObjects and # Geometries, then fill those up and add to the CityModel. # + [markdown] pycharm={"name": "#%% md\n"} # We create an empty CityModel # + pycharm={"is_executing": false, "name": "#%%\n"} cm = cityjson.CityJSON() print(cm) # + [markdown] pycharm={"name": "#%% md\n"} # An empty CityObject. Note that the ID is required. # + pycharm={"is_executing": false, "name": "#%%\n"} co = CityObject( id='1' ) # + [markdown] pycharm={"name": "#%% md\n"} # We can also add attributes # + pycharm={"is_executing": false, "name": "#%%\n"} co_attrs = { 'some_attribute': 42, 'other_attribute': 'bla bla' } co.attributes = co_attrs # + [markdown] pycharm={"name": "#%% md\n"} # Let's see what do we have # + pycharm={"is_executing": false, "name": "#%%\n"} print(co) # + [markdown] pycharm={"name": "#%% md\n"} # Instantiate a Geometry without boundaries and semantics # + pycharm={"is_executing": false, "name": "#%%\n"} geom = Geometry(type='Solid', lod=2) # + [markdown] pycharm={"name": "#%% md\n"} # We build the boundary Solid of the cube # The surfaces are in this order: WallSurface, WallSurface, WallSurface, WallSurface, GroundSurface, RoofSurface # + pycharm={"is_executing": false, "name": "#%%\n"} bdry = [ [[(0.0, 0.0, 0.0), (10.0, 0.0, 0.0), (10.0, 0.0, 10.0), (0.0, 0.0, 10.0)]], [[(10.0, 0.0, 0.0), (10.0, 10.0, 0.0), (10.0, 10.0, 10.0), (10.0, 0.0, 10.0)]], [[(10.0, 10.0, 0.0), (0.0, 10.0, 0.0), (0.0, 10.0, 10.0), (10.0, 10.0, 10.0)]], [[(0.0, 10.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 10.0), (0.0, 10.0, 10.0)]], [[(0.0, 0.0, 0.0), (0.0, 10.0, 0.0), (10.0, 10.0, 0.0), (10.0, 0.0, 0.0)]], [[(10.0, 0.0, 10.0), (10.0, 10.0, 10.0), (0.0, 10.0, 10.0), (0.0, 0.0, 10.0)]] ] # + [markdown] pycharm={"name": "#%% md\n"} # Add the boundary to the Geometry # + pycharm={"is_executing": false, "name": "#%%\n"} geom.boundaries.append(bdry) # + [markdown] pycharm={"name": "#%% md\n"} # We build the SemanticSurfaces for the boundary. The `surfaces` attribute must # contain at least the `surface_idx` and `type` keys, optionally `attributes`. # We have three semantic surface types, WallSurface, GroundSurface, RoofSurface. # + pycharm={"is_executing": false, "name": "#%%\n"} srf = { 0: {'surface_idx': [], 'type': 'WallSurface'}, 1: {'surface_idx': [], 'type': 'GroundSurface'}, 2: {'surface_idx': [], 'type': 'RoofSurface'} } # + [markdown] pycharm={"name": "#%% md\n"} # We use the `surface_idx` to point to the surfaces of the boundary. Thus the # index to a single boundary surface is composed as [Solid index, Shell index, Surface index]. # Consequently, in case of a CompositeSolid which first Solid, outer Shell, # second Surface is a WallSurface, one element in the `surface_idx` would be # `[0, 0, 1]`. Then assuming that there is only a single WallSurface in the # mentioned CompositeSolid, the index to the WallSurfaces is composed as # `{'surface_idx': [ [0, 0, 1] ], 'type': 'WallSurface'}`. # In case of a Solid boundary type the *Solid index* is omitted from the elements # of `surface_idx`. In case of a MultiSurface boundary type both the *Solid index* # and *Shell index* are omitted from the elements of `surface_idx`. # # We create the surface index accordingly and assign them to the geometry. # + pycharm={"is_executing": false, "name": "#%%\n"} geom.surfaces[0] = {'surface_idx': [[0,0], [0,1], [0,2], [0,3]], 'type': 'WallSurface'} geom.surfaces[1] = {'surface_idx': [[0,4]], 'type': 'GroundSurface'} geom.surfaces[2] = {'surface_idx': [[0,5]], 'type': 'RoofSurface'} # + [markdown] pycharm={"name": "#%% md\n"} # Then we test if it works. # + pycharm={"is_executing": false, "name": "#%%\n"} ground = geom.get_surfaces('groundsurface') ground_boundaries = [] for g in ground.values(): ground_boundaries.append(geom.get_surface_boundaries(g)) # + [markdown] pycharm={"name": "#%% md\n"} # We have a list of generators # + pycharm={"is_executing": false, "name": "#%%\n"} res = list(ground_boundaries[0]) # + [markdown] pycharm={"name": "#%% md\n"} # The generator creates a list of surfaces --> a MultiSurface # + pycharm={"is_executing": false, "name": "#%%\n"} assert res[0] == bdry[4] # %% wall = geom.get_surfaces('wallsurface') wall_boundaries = [] for w in wall.values(): wall_boundaries.append(geom.get_surface_boundaries(w)) # + [markdown] pycharm={"name": "#%% md\n"} # We put everything together, first filling up the CityObject # + pycharm={"is_executing": false, "name": "#%%\n"} co.geometry.append(geom) co.type = 'Building' # + [markdown] pycharm={"name": "#%% md\n"} # Then adding the CityObject to the CityModel. # + pycharm={"is_executing": false, "name": "#%%\n"} cm.cityobjects[co.id] = co # + [markdown] pycharm={"name": "#%% md\n"} # Let's validate the citymodel before writing it to a file. However, first we # need to index the geometry boundaries and create the vertex list, second we # need to add the cityobject and vertices to the internal json-store of the # citymodel so the `validate()` method can validate them. # # Note: CityJSON version 1.0.0 only accepts the Geometry `lod` as a numeric # value and not a string. # + pycharm={"is_executing": false, "name": "#%%\n"} cityobjects, vertex_lookup = cm.reference_geometry() cm.add_to_j(cityobjects,vertex_lookup) cm.update_bbox() #cm.validate(folder_schemas=schema_dir) # - cm # + [markdown] pycharm={"name": "#%% md\n"} # Finally, we write the citymodel to a CityJSON file. # + pycharm={"is_executing": false, "name": "#%%\n"} outfile = data_dir / 'test_create.json' cityjson.save(cm, outfile) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 函数 # # - 函数可以用来定义可重复代码,组织和简化 # - 一般来说一个函数在实际开发中为一个小功能 # - 一个类为一个大功能 # - 同样函数的长度不要超过一屏 # Python中的所有函数实际上都是有返回值(return None), # # 如果你没有设置return,那么Python将不显示None. # # 如果你设置return,那么将返回出return这个值. def HJN(): print('Hello') return 1000 b=HJN() print(b) HJN def panduan(number): if number % 2 == 0: print('O') else: print('J') panduan(number=1) panduan(2) # ## 定义一个函数 # # def function_name(list of parameters): # # do something # ![](../Photo/69.png) # - 以前使用的random 或者range 或者print.. 其实都是函数或者类 # 函数的参数如果有默认值的情况,当你调用该函数的时候: # 可以不给予参数值,那么就会走该参数的默认值 # 否则的话,就走你给予的参数值. import random def hahah(): n = random.randint(0,5) while 1: N = eval(input('>>')) if n == N: print('smart') break elif n < N: print('太小了') elif n > N: print('太大了') # ## 调用一个函数 # - functionName() # - "()" 就代表调用 def H(): print('hahaha') def B(): H() B() def A(f): f() A(B) # ![](../Photo/70.png) # ## 带返回值和不带返回值的函数 # - return 返回的内容 # - return 返回多个值 # - 一般情况下,在多个函数协同完成一个功能的时候,那么将会有返回值 # ![](../Photo/71.png) # # - 当然也可以自定义返回None # ## EP: # ![](../Photo/72.png) def main(): print(min(min(5,6),(51,6))) def min(n1,n2): a = n1 if n2 < a: a = n2 main() # ## 类型和关键字参数 # - 普通参数 # - 多个参数 # - 默认值参数 # - 不定长参数 # ## 普通参数 # ## 多个参数 # ## 默认值参数 # ## 强制命名 def U(str_): xiaoxie = 0 for i in str_: ASCII = ord(i) if 97<=ASCII<=122: xiaoxie +=1 elif xxxx: daxie += 1 elif xxxx: shuzi += 1 return xiaoxie,daxie,shuzi U('HJi12') # ## 不定长参数 # - \*args # > - 不定长,来多少装多少,不装也是可以的 # - 返回的数据类型是元组 # - args 名字是可以修改的,只是我们约定俗成的是args # - \**kwargs # > - 返回的字典 # - 输入的一定要是表达式(键值对) # - name,\*args,name2,\**kwargs 使用参数名 def TT(a,b) def TT(*args,**kwargs): print(kwargs) print(args) TT(1,2,3,4,6,a=100,b=1000) {'key':'value'} TT(1,2,4,5,7,8,9,) def B(name1,nam3): pass B(name1=100,2) # + def sum_(*args,A='sum'): res = 0 count = 0 for i in args: res +=i count += 1 if A == "sum": return res elif A == "mean": mean = res / count return res,mean else: print(A,'还未开放') # - sum_(-1,0,1,4,A='var') 'aHbK134'.__iter__ b = 'asdkjfh' for i in b : print(i) 2,5 2 + 22 + 222 + 2222 + 22222 # ## 变量的作用域 # - 局部变量 local # - 全局变量 global # - globals 函数返回一个全局变量的字典,包括所有导入的变量 # - locals() 函数会以字典类型返回当前位置的全部局部变量。 a = 1000 b = 10 def Y(): global a,b a += 100 print(a) Y() def YY(a1): a1 += 100 print(a1) YY(a) print(a) # ## 注意: # - global :在进行赋值操作的时候需要声明 # - 官方解释:This is because when you make an assignment to a variable in a scope, that variable becomes local to that scope and shadows any similarly named variable in the outer scope. # - ![](../Photo/73.png) # # Homework # - 1 # ![](../Photo/74.png) def getPentagonalNumber(): n=1 count=0 for n in range(1,101): geshu=(n*(3*n-1))/2 n +=1 print(geshu,end=" ") count +=1 if count % 10 == 0: print() getPentagonalNumber() # - 2 # ![](../Photo/75.png) def a(): b=float(input()) ge=b%10 shi=((b//10)%10) bai=b//100 he=ge+shi+bai print(he) a() # - 3 # ![](../Photo/76.png) def a(): nums=[] for i in range(3): num=float(input('请输入第{}个数:'.format(i+1))) nums.append(num) print(sorted(nums)) a() # - 4 # ![](../Photo/77.png) def a(): touzi,lilv=map(float,input().split(",")) nian=0 print('year Fture Value') for i in range(30): nian +=1 touzi=touzi+(touzi*(lilv/100)) print('{} {}'.format(nian,touzi)) a() # - 5 # ![](../Photo/78.png) def a(): num=0 for i in range(97,123,1): if i>=108: print(chr(i),end=(" ")) num +=1 if num%10==0: print() a() # - 6 # ![](../Photo/79.png) def a(): for i in range(2010,2021,1): if ((i%4==0 and i%100!=0 )or(i%400==0)): print('{} 有366天'.format(i)) else: print('{} 有365天'.format(i)) a() # - 7 # ![](../Photo/80.png) def a(): a1,b1,a2,b2=map(float,input().split(",")) if (a2>a1) and (b2>b1): c=((a2-a1)**2+(b2-b1)**2)**0.5 print(c) elif (a2>a1) and (b1>b2): c=((a2-a1)**2+(b1-b2)**2)**0.5 print(c) elif (a1>a2) and (b2>b1): c=((a1-a2)**2+(b2-b1)**2)**0.5 print(c) elif (a1>a2) and (b1>b2): c=((a1-a2)**2+(b1-b2)**2)**0.5 print(c) elif (a1==a2) and (b1>b2): c=(b1-b2) print(c) elif (a1==a2) and (b2>b1): c=(b2-b1) print(c) elif (b1==b2) and (a1>a2): c=(a1-a2) print(c) elif (b1==b2) and (a2>a1): c=(a2-a1) print(c) else: print(0) a() # - 8 # ![](../Photo/81.png) # + import math def number_money2(p): for p in range(1,p+1): num = 0 for i in range (2,p): if p % i == 0: num +=1 if num == 0: number = 2**p - 1 number_money1(number,p) def number_money1(number,p): num1=0 for i in range (2,number): if number % i == 0: num1 +=1 if num1 == 0: print(p,number) def Start(): number_money2(31) Start() # - # - 9 # ![](../Photo/82.png) # ![](../Photo/83.png) import time def a(): localtime = time.asctime(time.localtime(time.time())) print("本地时间为:", localtime) a() # - 10 # ![](../Photo/84.png) def Roll_dice(): import random dice1=random.randint(1,6) dice2=random.randint(1,6) print('Dice1:%d Dice2:%d'%(dice1,dice2)) sum = dice1+dice2 return sum def Judge(): sum=Roll_dice() print('点数:%d'%sum) a=(2,3,12) b=(7,11) c=(4,5,6,8,9,10) if sum in a: print('你输了') if sum in b: print('你赢了') if sum in c: Judge1(sum) def Judge1(sum): sum_1=sum while True: sum=Roll_dice() print('点数:%d'%sum) if sum==sum_1: print('你赢了') break if sum==7: print('你输了') break def start(): Judge() start() # - 11 # ### 去网上寻找如何用Python代码发送邮件 # + # 简单邮件传输协议 import smtplib import email from email.mime.text import MIMEText from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart # 设置邮箱的域名 HOST = 'smtp.qq.com' # 设置邮件标题 SUBJECT = 'csdn博客代码' # 设置发件人邮箱 FROM = '' # 设置收件人邮箱 TO = ',' message = MIMEMultipart('related') #--------------------------------------发送文本----------------- # 发送邮件主体到对方的邮箱中 message_html = MIMEText('

    CSDN博客超级好

    ','html','utf-8') message.attach(message_html) #-------------------------------------发送图片-------------------- # rb 读取二进制文件 # 要确定当前目录有1.jpg这个文件 image_data = open('1.jpg','rb') # 设置读取获取的二进制数据 message_image = MIMEImage(image_data.read()) # 关闭刚才打开的文件 image_data.close() message_image.add_header('Content-ID','big') # 添加图片文件到邮件信息当中去 # message.attach(message_image) #-------------------------------------添加文件--------------------- # 要确定当前目录有table.xls这个文件 message_xlsx = MIMEText(open('table.xls','rb').read(),'base64','utf-8') # 设置文件在附件当中的名字 message_xlsx['Content-Disposition'] = 'attachment;filename="test1111.xlsx"' message.attach(message_xlsx) # 设置邮件发件人 message['From'] = FROM # 设置邮件收件人 message['To'] = TO # 设置邮件标题 message['Subject'] = SUBJECT # 获取简单邮件传输协议的证书 email_client = smtplib.SMTP_SSL() # 设置发件人邮箱的域名和端口,端口为465 email_client.connect(HOST,'465') # ---------------------------邮箱授权码------------------------------ result = email_client.login(FROM,'邮箱授权码') print('登录结果',result) email_client.sendmail(from_addr=FROM,to_addrs=TO.split(','),msg=message.as_string()) # 关闭邮件发送客户端 email_client.close() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Approche incrémentale et parcimonieuse # # [x] Analyse des biais dans les données [www](https://www.kaggle.com/chasset/introduction) # # [ ] Chargement des images # # [ ] Affichage d’image pour une famille ou une espèce donnée # # [ ] Réseau de neurones convolutifs (CNN) de détection d’une famille : Whale/Dolphin # # [ ] CNN de détection d’une espèce : 30 espèces # # [ ] CNN de détection d’un individu : environ 16000 # # [ ] Augmentation des données par nuance de gris, pivotement, translation et miroir # # [ ] CNN + Transformer # # # Questions en suspend # # - Forme # - Est-ce que l’on s’intéresse uniquement à l’aileron, à ses détails de texture, à la forme du dos ? # - Couleur # - Pour passer du groupe à l'individu, un travail sur la couleur et la texture serait utile non ? # - Il y a des photos prises dans des conditions d’éclairage très divers également (obscurité, contre-jour), ce qui influence la couleur. Certains auteurs proposent de travailler en nuance de gris. # - Taille # - Et la taille (surface) de l'aileron en plus de sa morphologie ? Mais à quel point est-ce faisable d'estimer une taille à partir d'une image ??? # - Aucune information sur la distance de prise de vue ou une échelle permettant d’estimer la taille, l’âge de l’individu. # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Model 6 # Original model, but changes the standarization for normalization using MinMaxScaler, and number of epochs to 80. # Imports import pandas as pd import numpy as np from pathlib import Path import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler,OneHotEncoder from datetime import date # --- # # ## Prepare the data to be used on a neural network model # ### Step 1: Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that will need to be encoded, as well as columns that could eventually define your features and target variables. # # + # Read the applicants_data.csv file from the Resources folder into a Pandas DataFrame applicant_data_df = pd.read_csv( Path('../Resources/applicants_data.csv',) ) # Review the DataFrame applicant_data_df.head() # - # Review the data types associated with the columns applicant_data_df.dtypes # ### Step 2: Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model. # + # Drop the 'EIN' and 'NAME' columns from the DataFrame applicant_data_df = applicant_data_df.drop(columns=['EIN','NAME']) # Review the DataFrame applicant_data_df.head() # - # ### Step 3: Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame. # + # Create a list of categorical variables categorical_variables = list(applicant_data_df.dtypes[applicant_data_df.dtypes=='object'].index) # Display the categorical variables list categorical_variables # - # Create a OneHotEncoder instance enc = OneHotEncoder(sparse=False) # Encode the categorcal variables using OneHotEncoder encoded_data = enc.fit_transform(applicant_data_df[categorical_variables]) # + # Create a DataFrame with the encoded variables encoded_df = pd.DataFrame( encoded_data, columns=enc.get_feature_names(categorical_variables) ) # Review the DataFrame encoded_df.head() # - # ### Step 4: Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables. # # > **Note** To complete this step, you will employ the Pandas `concat()` function that was introduced earlier in this course. non_categorical_variables = list(applicant_data_df.dtypes[applicant_data_df.dtypes!='object'].index) non_categorical_variables # + # Add the numerical variables from the original DataFrame to the one-hot encoding DataFrame encoded_df =pd.concat([applicant_data_df[non_categorical_variables], encoded_df], axis=1) # Review the Dataframe encoded_df.head() # - # ### Step 5: Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset. # # # + # Define the target set y using the IS_SUCCESSFUL column y = encoded_df['IS_SUCCESSFUL'] # Display a sample of y y.head() # + # Define features set X by selecting all columns but IS_SUCCESSFUL X = encoded_df.drop(columns=['IS_SUCCESSFUL']) # Review the features DataFrame X.head() # - # ### Step 6: Split the features and target sets into training and testing datasets. # # Split the preprocessed data into a training and testing dataset # Assign the function a random_state equal to 1 X_train, X_test, y_train, y_test = train_test_split(X,y) # ### Step 7: Use scikit-learn's `StandardScaler` to scale the features data. # + # Create a StandardScaler instance scaler = MinMaxScaler() # Fit the scaler to the features training dataset X_scaler = scaler.fit(X_train) # Fit the scaler to the features training dataset X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # - # --- # # ## Compile and Evaluate a Binary Classification Model Using a Neural Network # ### Step 1: Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras. # # > **Hint** You can start with a two-layer deep neural network model that uses the `relu` activation function for both layers. # # + # Define the the number of inputs (features) to the model # This is the numbers of columns in the X array number_input_features = X.shape[1] # Review the number of features number_input_features # - # Define the number of neurons in the output layer number_output_neurons = 1 # + # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1 = np.ceil(np.sqrt(number_input_features * number_output_neurons)) # Review the number hidden nodes in the first layer hidden_nodes_layer1 # + # Define the number of hidden nodes for the second hidden layer hidden_nodes_layer2 = np.ceil(np.sqrt(hidden_nodes_layer1 * number_output_neurons)) # Review the number hidden nodes in the second layer hidden_nodes_layer2 # - # Create the Sequential model instance nn = Sequential() # Add the first hidden layer nn.add( Dense( units=hidden_nodes_layer1, activation='relu', input_dim=number_input_features ) ) # Add the second hidden layer nn.add( Dense( units=hidden_nodes_layer2, activation='relu' ) ) # Add the output layer to the model specifying the number of output neurons and activation function nn.add( Dense( units=1, activation='sigmoid' ) ) # Display the Sequential model summary nn.summary() # ### Step 2: Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric. # # Compile the Sequential model nn.compile( loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Fit the model using 50 epochs and the training data # nn_model will take the history epochs=80 nn_model=nn.fit(X_train_scaled, y_train, epochs=epochs) # ### Step 3: Evaluate the model using the test data to determine the model’s loss and accuracy. # # + # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn.evaluate( X_test_scaled, y_test, verbose=2 ) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # - # ### Step 4: Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`. # # + # Set the model's file path file_path = Path('../Resources/AlphabetSoup_Model_6.h5') # Export your model to a HDF5 file nn.save(file_path) # - # --- # # ## Optimize the neural network models # In separate files there are 3 alternative neural networks models for the classification # # ### Step 2: After finishing your models, display the accuracy scores achieved by each model, and compare the results. # + # Display the model loss and accuracy results results_df=pd.DataFrame( data={ 'Date': [date.today()], 'Model' : [6], 'Observations': ['Original model, MinMaxScaler'], 'Loss' : [model_loss], 'Accuracy': [model_accuracy], 'Hidden layers': 2, 'Input Features':[number_input_features], 'Output Neurons': [number_output_neurons], 'Neurons in Hidden Layers1' : [hidden_nodes_layer1], 'Neurons in Hidden Layers2' : [hidden_nodes_layer2], 'Epochs' : [epochs], 'Activation Hidden layer 1': ['relu'], 'Activation Hidden layer 2': ['relu'], 'Activation Output layer 1': ['sigmoid'], 'loss':['binary_crossentropy'], 'optimizer':['adam'], 'metrics':['accuracy'] }) print(results_df.T) # - results_df.to_csv('../Resources/results.csv', mode='a') # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R 4.1.2 # language: R # name: ir41 # --- # # Water fluoridation and Dental Caries # # **Date:** 2021-12-01 # # **Reference:** M249, Book 1, Part 2 suppressPackageStartupMessages(library(tidyverse)) library(R249) library(DescTools) # ## Summary # ## Get the data (dat <- as_tibble(read.csv(file = "..\\..\\data\\dentalcaries.csv"))) # ## Prepare the data # # Cast the `exposure`, `outcome`, `level` columns to factors. labexp <- c("not fluoridated", "fluoridated") labout <- c("caries", "no caries") lablev <- c("age 8 years", "age 9 years", "age 10 years", "age 11-12 years") (sorteddat <- dat %>% mutate(exposure = factor(dat$exposure, labexp)) %>% mutate(outcome = factor(dat$outcome, labout)) %>% mutate(level = factor(dat$level, lablev)) %>% arrange(level, exposure, outcome)) # Filter the tibble on each specific `level`, pull the `count` column as a vector and initilise a matrix. # Append this new matrix to an array. age8 <- filter(sorteddat, level == "age 8 years") %>% pull(count) %>% matrix(nrow = 2, ncol = 2, byrow = TRUE, dimnames = list(labexp, labout)) age9 <- filter(sorteddat, level == "age 9 years") %>% pull(count) %>% matrix(nrow = 2, ncol = 2, byrow = TRUE, dimnames = list(labexp, labout)) age10 <- filter(sorteddat, level == "age 10 years") %>% pull(count) %>% matrix(nrow = 2, ncol = 2, byrow = TRUE, dimnames = list(labexp, labout)) age11_12 <- filter(sorteddat, level == "age 11-12 years") %>% pull(count) %>% matrix(nrow = 2, ncol = 2, byrow = TRUE, dimnames = list(labexp, labout)) datarr <- array( c(age8, age9, age10, age11_12), dim = c(2, 2, 4), dimnames = list(labexp, labout, lablev) ) print(datarr) # ## Stratum-specific odds ratios # # Calculate the stratum-specific odds ratio. # # Note, we do not calculate the stratum-specific odds ratio for level age 9 years, given the presence of the 0 count. # age 8 years oddsratio(datarr[, , 1]) # age 10 years oddsratio(datarr[, , 3]) # age 11-12 years oddsratio(datarr[, , 4]) # ## Crude odds ratio oddsratio_crude(datarr) # ## Tarone's test for homogeneity BreslowDayTest(datarr, correct = TRUE) # ## Mantel-Haenszel odds ratio and chi-squared test mantelhaen.test(datarr) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import math # + sixteen_df = pd.read_csv('./uscho_mens_games_2016.csv',header=None) sixteen_df['season'] = pd.Series(2016, index=sixteen_df.index) seventeen_df = pd.read_csv('./uscho_mens_games_2017.csv',header=None) seventeen_df['season'] = pd.Series(2017, index=seventeen_df.index) eighteen_df = pd.read_csv('./uscho_mens_games_2018.csv',header=None) eighteen_df['season'] = pd.Series(2018, index=eighteen_df.index) #drop games not yet played eighteen_df = eighteen_df.dropna(subset=[9]) # put all the seasons together df = pd.concat([sixteen_df,seventeen_df,eighteen_df],ignore_index=True) # - df.head() df = df[[8,9,14,15,'season']] df.head() df = df.rename(columns={8: 'team', 9: 'teamscore', 14: 'opponent', 15: 'oppscore'}) df.head() df['weights'] = (df['season']-2012) df['weights'] = df['weights'].apply(lambda x: math.factorial(x)) df.head() df['location'] = 'V' df.head() import re df.head() # + df['team'] = df['team'].apply(lambda x: re.sub("[\(\[].*?[\)\]]", "", x)) df['team'] = df['team'].apply(lambda x: x.lstrip()) df['opponent'] = df['opponent'].apply(lambda x: re.sub("[\(\[].*?[\)\]]", "", x)) df['opponent'] = df['opponent'].apply(lambda x: x.lstrip()) # - df.head() df['scorediff'] = (df['teamscore']-df['oppscore']) df['location'] = df['location'].replace('V',-1) df['location'] = df['location'].replace('N',0) df['location'] = df['location'].replace('H',1) df.head() df1 = df.copy() df1.head() df1 = df1.rename(columns={'team': 'opponent', 'teamscore': 'oppscore', 'opponent': 'team', 'oppscore':'teamscore'}) df1.head() df1['scorediff'] = df1['scorediff'].apply(lambda x: x*-1) df1['location'] = df1['location'].apply(lambda x: x*-1) df1.head() df.shape df1.shape df = pd.concat([df,df1],ignore_index=True) df.head() df = df[~df.team.str.contains("/")] df = df[~df.opponent.str.contains("/")] # + # create dummy variables, need to do this in python b/c does not handle automatically like R team_dummies = pd.get_dummies(df.team, prefix='team') opponent_dummies = pd.get_dummies(df.opponent, prefix='opponent') df = pd.concat([df, team_dummies, opponent_dummies], axis=1) # - df.head() df = df.dropna(subset=['scorediff','weights']) # make the training data X = df.drop(['team','opponent','teamscore','oppscore','season','weights','scorediff'], axis=1) y = df['scorediff'] weights = df['weights'] X.head() from sklearn.linear_model import Ridge ridge_reg = Ridge() ridge_reg.fit(X, y, sample_weight=weights) # get the R^2 value r_squared = ridge_reg.score(X, y, sample_weight=weights) print('R^2 on the training data:') print(r_squared) # get the coefficients for each feature coef_data = list(zip(X.columns,ridge_reg.coef_)) coef_df = pd.DataFrame(coef_data,columns=['feature','feature_coef']) coef_df.head() # + # first get rid of opponent_ variables team_df = coef_df[~coef_df['feature'].str.contains("opponent")] # get rid of the location variable team_df = team_df.iloc[1:] # - # rank them by coef, not alphabetical order ranked_team_df = team_df.sort_values(['feature_coef'],ascending=False) # reset the indices at 0 ranked_team_df = ranked_team_df.reset_index(drop=True); ranked_team_df.rename(columns={'feature':'team', 'feature_coef':'YUSAG_coef'}, inplace=True) ranked_team_df['team'] = ranked_team_df['team'].str.replace('team_', '') ranked_team_df ivy_team_names = ['Yale','Harvard','Princeton','Cornell','Brown','Columbia','Dartmouth','Penn'] ivy_ranked_team_df = ranked_team_df[ranked_team_df['team'].isin(ivy_team_names)] ivy_ranked_team_df ECAC_team_names = ['Yale','Harvard','Princeton','Cornell','Brown','Columbia','Dartmouth','Penn', 'Clarkson','Colgate','Union','','Quinnipiac','Rensselaer'] ECAC_ranked_team_df = ranked_team_df[ranked_team_df['team'].isin(ECAC_team_names)] ECAC_ranked_team_df ECAC_ranked_team_df.to_csv("ECAC_ranked_team_df") # I am now going to train a logistic regression model, so that we can do win probabilities. # # I am going to use multinomial linear regression to get probabilities for the three classes (0=loss, 1=tie, 2=win) # Using the model to predict win probabilities # First, let's figure out how the predicted score differentials translate into percentages. df['predicted_scorediff'] = ridge_reg.predict(X) df.head() df['predicted_scorediff'] = df['predicted_scorediff'].apply(lambda x: round(x,2)) df.head() # I'm only going to train the point spread to percentages model on the most recent data b/c that's what the YUSAG prediction model is optimized for. I.e. the predictions are not great when you go back a couple of years since we weights more recent results more heavily. # ignore warnings from pandas (sorry my code probably isn't idiomatic) import warnings warnings.filterwarnings('ignore') last_year_df = df[df['season']==2018] last_year_df.loc[last_year_df.scorediff > 0, 'result'] = 2 # win last_year_df.loc[last_year_df.scorediff == 0, 'result'] = 1 # tie last_year_df.loc[last_year_df.scorediff < 0, 'result'] = 0 # tie last_year_df[['scorediff','predicted_scorediff','result']].head(20) # Now use a logistic regression model to predict the result based on the predicted_scorediff from sklearn.linear_model import LogisticRegression # make the training data X = last_year_df['predicted_scorediff'] X = X.values.reshape(-1,1) y = last_year_df['result'] log_reg = LogisticRegression() # we create an instance of Neighbours Classifier and fit the data. log_reg.fit(X, y) log_reg.score(X, y) log_reg.predict_proba(X) # + last_year_df['loss_prob'] = 0 last_year_df['loss_prob'] = log_reg.predict_proba(X)[:,0] last_year_df['tie_prob'] = 0 last_year_df['tie_prob'] = log_reg.predict_proba(X)[:,1] last_year_df['win_prob'] = 0 last_year_df['win_prob'] = log_reg.predict_proba(X)[:,2] # - last_year_df[['scorediff','predicted_scorediff','loss_prob','tie_prob','win_prob']] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CH.9_Plotting_and_Visualization # ## 1. Brief Matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = np.arange(10) data plt.plot(data) plt.show() # ## 2. Subplot fig, axes = plt.subplots(2,2, sharex = True, sharey = True) for i in range(2) : for j in range(2) : axes[i, j].hist(np.random.randn(500), bins = 50, color = 'k', alpha = 0.5) plt.subplots_adjust(wspace = 0, hspace = 0) # ## 3. Color, Marker, Style from numpy.random import randn plt.plot(randn(30).cumsum(), 'ko--') plt.show() # + data = np.random.randn(30).cumsum() plt.plot(data, 'k--', label = 'Default') plt.plot(data, 'k--', drawstyle = 'steps-post', label = 'steps-post') plt.legend(loc = 'best') plt.show() # - # ## 4. Label, Legend fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(np.random.randn(1000).cumsum()) # + fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(randn(1000).cumsum(), 'k', label = 'one') ax.plot(randn(1000).cumsum(), 'k--', label = 'two') ax.plot(randn(1000).cumsum(), 'k', label = 'three') plt.legend(loc = 'best') plt.show() # - # ## 5. Figure # + fig = plt.figure(figsize = (12, 6)) ax = fig.add_subplot(1,1,1) rect = plt.Rectangle((0.2, 0.75), 0.4, 0.15, color = 'k', alpha = 0.3) circ = plt.Circle((0.7, 0.2), 0.15, color = 'b', alpha = 0.3) pgon = plt.Polygon([[0.15, 0.15], [0.35, 0.4], [0.2, 0.6]], color = 'g', alpha = 0.5) ax.add_patch(rect) ax.add_patch(circ) ax.add_patch(pgon) # - # - save # plt.savefig('figpath.svg') # plt.savefig('figpath.png', dpi = 400, bbox_inches = 'tight') # from io import BytesIO buffer = BytesIO() # plt.savefig(buffer) # plot_data = buffer.getvalue() # ## 6. Line, Bar, Histogram s = pd.Series(np.random.randn(10).cumsum(), index = np.arange(0,100,10)) s.plot() fig, axes = plt.subplots(2,1) data = pd.Series(np.random.rand(16), index = list('abcdefghijklmnop')) data.plot.bar(ax = axes[0], color = 'k', alpha = 0.7) data.plot.barh(ax = axes[1], color = 'k', alpha = 0.7) comp1 = np.random.normal(0,1, size = 200) comp2 = np.random.normal(10,2, size = 100) values = pd.Series(np.concatenate([comp1, comp2])) sns.distplot(values, bins = 100, color = 'k') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Super-Simple Neural Network Demo import math import tensorflow as tf import numpy as np from matplotlib import pyplot as plt import matplotlib from mpl_toolkits.mplot3d import Axes3D # %matplotlib inline # ### Create some 2-dimensional data # + def ground_truth(x,y): return (1.3*x-.5)*(1.3*x-.5) + (y-.5)*(y-.5) < .05 def createSamples(N, xl, xr, yu, yo, ground_truth, rnd=True): """ ground_truth is a function that calculates the "true" label, given coordinates x and y Produce N samples in the rectangle [xl, xr, yu, yo] with the given ground_truth """ if rnd: np.random.seed(1234) x = np.random.uniform(xl,xr,N) y = np.random.uniform(yu,yo,N) else: N = int(math.sqrt(N)) dx = (xr - xl) / N dy = (yo - yu) / N field = np.array([(xl + dx * xs,yu + dy * ys) for xs in range(N) for ys in range(N)]).T x, y = field[0], field[1] c = ground_truth(x, y) * 1.0 return x, y, c # - # Before rerunning, close the previous session. Ignore error the very first time try: sess.close() except NameError: print("Don't worry. Need to ignore this error once") sess = tf.InteractiveSession() FLAGS=lambda: None # ### The 2-dimensional input data, classes are represented by colors sx, sy, sc = createSamples(10000, 0, 1, 0, 1, ground_truth, rnd=False) points=np.array([sx, sy]) tr_samples = points.T # Need transposed for use with Tensorflow matplotlib.rcParams['figure.figsize'] = (8,8) plt.scatter(sx, sy, c=sc, cmap="bwr", marker='.', s=1) # ### The Neural Network # We'll be creating a simple feed-forward network with two hidden layers. # # ![NN](images/NN_2x3x3x2_small.png) # # Our neural network will be defined as # # $ f(x) = \Theta^{(3)} \cdot \sigma(\Theta^{(2)} \cdot \sigma(\Theta^{(1)} \cdot x + b^{(1)} ) + b^{(2)}) + b^{(3)}$ # # Note, that we omit the final non-linearity at this point. That's for mere technical reasons and doesn't change the story. # # Below you see the neural network in code, featuring some illustrative initial values. # # You see: We have 2 input nodes, 3 nodes in each of the hidden layers and again 2 nodes in the output layer # ### Weight matrices and biases initialized to some values Theta1 = np.array([[1.6, 4], [1.6, -1.2], [-3.6, 1.6]]) # 3x2 weight Matrix towards the first hidden layer b1 = np.array([[-1, 1, 6]]).T # bias of the first hidden layer Theta2 = np.array([[1, 2, -3], [.5, .2, -3], [2, 1, -.2]]) # 3x3 weight Matrix towards the second hidden layer b2 = np.array([[.2, .1, -.4]]).T # bias of the 2nd hidden layer Theta3 = np.array([[.5, 2, -.03], [.2, 1, -.2]]) # 2x3 weight Matrix towards the output layer b3 = np.array([[.2, .3]]).T # bias of the output layer # + # # NOTE: You need to initialize with the transpose of the weight matrix, otherwise TF mixes up columns and rows # Of course that's not a problem, since typically, all numbers are randomly initialized, anyway. # (can you imagine the frustration until I found out?) # W1_i=tf.constant_initializer(Theta1.T) b1_i=tf.constant_initializer(b1) W2_i=tf.constant_initializer(Theta2.T) b2_i=tf.constant_initializer(b2) W3_i=tf.constant_initializer(Theta3.T) b3_i=tf.constant_initializer(b3) def feed_forward(x): _dense1=tf.layers.Dense(3, activation=tf.nn.sigmoid, kernel_initializer=W1_i, bias_initializer=b1_i) _dense2=tf.layers.Dense(3, activation=tf.nn.sigmoid, kernel_initializer=W2_i, bias_initializer=b2_i) _logits=tf.layers.Dense(2, kernel_initializer=W3_i, bias_initializer=b3_i) dense1 = _dense1(x) dense2 = _dense2(dense1) y = _logits(dense2) return dense1, dense2, y # - # ### Placeholder for the different data sets X = tf.placeholder(dtype=tf.float64, shape=[None, 2]) L = tf.placeholder(dtype=tf.int64, shape=[None]) # ### Construct the computational graph for the optimization classes = tf.one_hot(L, depth=2) hidden1, hidden2, output = feed_forward(X) probs = tf.nn.softmax(output) objective = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=classes)) optimizer = tf.train.AdamOptimizer(learning_rate=1e-2) train = optimizer.minimize(objective) preds = tf.argmax(probs,axis=1) accuracy = tf.reduce_mean(tf.cast(tf.equal(preds, L), dtype=tf.float64)) # ### Setting up the training # + init = tf.global_variables_initializer() sess.run(init) losses = [] accies = [] n_batch = 50 def single_batch(n_batch): for _ in range(n_batch): _, _all_output, _objective, _accuracy = sess.run([train, output, objective, accuracy], feed_dict={X: tr_samples, L: sc.astype(int)}) print ("Loss: %s - Accuracy: %s" % (_objective, _accuracy)) losses.append(_objective) accies.append(_accuracy) return _all_output # - # ### First, let's look at the hidden layers before the training y, h1, h2 = sess.run([preds, hidden1, hidden2], feed_dict={X: tr_samples, L: sc.astype(int)}) h1 = h1.T h2 = h2.T # ### The first hidden layer before the training matplotlib.rcParams['figure.figsize'] = (12,12) plt.axes(projection='3d', elev=30, azim=110) plt.scatter(*h1, c=sc, cmap='bwr', marker='.') # ### The Second Hidden Layer plt.axes(projection='3d', elev=40, azim=240) plt.scatter(*h2, c=sc, cmap='bwr', marker='.') # ### Do 50 Training Runs for _ in range (25): all_output = single_batch(n_batch) # ### Learning to Linearly Separate # The network learned to tweak the manifold of the last hidden layer such that a hyperplane can separate red from blue points y, h1, h2 = sess.run([preds, hidden1, hidden2], feed_dict={X: tr_samples, L: sc.astype(int)}) h1 = h1.T h2 = h2.T # ### The First Hidden Layer After Training plt.axes(projection='3d', elev=20, azim=70) plt.scatter(*h1, c=sc, cmap='bwr', marker='.') # ### The Second Hidden Layer After the Training plt.axes(projection='3d', elev=40, azim=240) plt.scatter(*h2, c=sc, cmap='bwr', marker='.') # ### After some period of stagnation, the network learned to predict the colors from the coordinates plt.plot(accies) plt.plot(losses) # ### Inferring the classes (colors) of some given test coordinates # + N=20 sx, sy, sc = createSamples(N, 0, 1, 0, 1, ground_truth) points=np.array([sx, sy]) plt.scatter(sx, sy, c=sc, cmap="bwr", marker='.') test_samples = np.array([sx, sy]).T test_labels = sc.astype(int) # - # ### The network correctly infers (most of) the classes of the given test coordinates # + test_infered = sess.run(preds, feed_dict={X: test_samples, L: test_labels}) print("true classes : %s" % test_labels) print('infered classes: %s' % test_infered) # - # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.3 # language: julia # name: julia-1.5 # --- include("../load.jl") include("../tools/plots.jl") gm = gear(true) uniform_sample_and_eval!(gm) bbrs = [bbl for bbl in gm.bbls if bbl isa BlackBoxRegressor] bbr = bbrs[1] bbr.constraint surveysolve(gm); gm.solution_history println("Approximate objective: $(bbr.optima)") println("Actual objective: $(bbr.actuals)") # How to narrow the gap? learn_constraint!(bbr, "upper" => minimum(bbr.actuals)) update_tree_constraints!(gm, bbr) upper_bound_sample(bbr) learn_constraint!(bbr, "reg" => minimum(bbr.actuals), regression_sparsity = 0, max_depth = 5) update_tree_constraints!(gm, bbr) optimize!(gm) df = copy(gm.solution_history) insertcols!(df, :actual => bbr.actuals) # Showing upper bounding classifier bbr.learners[end-1] # Showing regressor bbr.learners[end-2] m = gear(false); optimize!(m); println("Solution: $(solution(m)) with objective $(getobjectivevalue(m))") print(gm.model) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="lYeNN6UJDMDz" colab_type="text" # # **Population prediction via CNNs and geospatial imagery** # # ### Spring 2020 | AM216 Final Project # ### , , # GitHub repository [here](https://github.com/chickert/geospatial_analysis) # # + [markdown] id="mgtEVoQAc5BL" colab_type="text" # #Abstract: # # Google Earth Engine has a multi-petabyte catalog of open source satellite imagery and geospatial datasets available to scientists and researchers to analyze. One interesting problem is extracting demographic information from these satellite images, such as the population of a given area. Our goal is to build neural network models to estimate the population from satellite images. This is a difficult task, as the model must learn land use types within an image in order to identify high- versus low-population areas. Previous sources have worked on a similar problem using deep sequential models and UNet architectures. We expand this by building many models including FFNNs, CNNs, UNets, and also investigating the value of transfer learning for this task. # + [markdown] id="lNyso3y1EMAF" colab_type="text" # # Introduction # # Our group is interested in the combination of machine learning methods and satellite imagery. The amount of data in this space (pun intended) is already remarkable, and growing rapidly. In addition to the large datasets released from government-affiliated science organizations -- such as NASA/USGS' Landsat image collection or the EU Copernicus program's Sentinel data hub -- the proliferation of privately-held "constellations" is now contributing to the growing data pool. Simultaneously, improvements in image analysis driven by machine learning methods make gleaning insights from this relatively-unstructured data more feasible than ever. # # For our project, our group assesses the performance of neural networks in predicting a US census tract's population with only raw RGB pixel data collected from the Copernicus satellite program's geospatial imagery. For the context of this project, we will limit our anlaysis to Massachusetts, though we expect that our results are relevant beyond the state's borders. On an even broader level, we hope that the general process and data pipeline used to implement our project could be adapted relatively easily to a much wider variety of predicton tasks using geospatial data from Google's Earth Engine. # # + [markdown] id="5ci63qNtFHi8" colab_type="text" # # Background and Data Sources # # ## 2010 Census Data ## # Every 10 years, the United States conducts a census and releases this information to the public. The highest level of granularity in this information is a "census block", which is roughly the size of a city block. Census blocks are, in turn, part of census tracts, which vary significantly in size depending on population density, but in urban areas, are roughly the size of a neighborhood and contain somewhere between [1,200 and 8,000 inhabitants](https://www2.census.gov/geo/pdfs/education/CensusTracts.pdf). # + [markdown] id="h1wKL6lTIUYf" colab_type="text" # ## Cenpy ## # # The 2010 Census is massive, and as such, we used CenPy (https://cenpy-devs.github.io/) to easily query this dataset for the information we wanted. CenPy is built with Geopandas (https://geopandas.org/), which allows for easy data manipulation and discovery. # # Below, we install CenPy and pull back information about every tract in the state of Massachusetts. # # Specifically, note the variable paramater, which controls which data we get back from the census, for example `variables=['H006002', 'P001001']`. Every variable is defined [here](https://api.census.gov/data/2010/dec/sf1/variables.html). In this example the code is pulling back # `H006002` which corresponds to "Average household size!!Renter occupied" # and `P001001` which corresponds to "Total Population". In the code block below we pull back the population data using this second variable. # # It should again be noted that aggregating this information using the raw census data would be slow and somewhat computationally expensive; thus, using CenPy is preferred. # # + id="zZRKoxcdDEf2" colab_type="code" outputId="78f0520c-9f75-4a52-f17e-118c0f7ddce7" colab={"base_uri": "https://localhost:8080/", "height": 928} # !apt install libspatialindex-dev # !pip install cenpy import cenpy massachusetts = cenpy.products.Decennial2010().from_state('Massachusetts', variables=['P001001'], level="tract") massachusetts.head() # + [markdown] id="-CiHaoXTELUJ" colab_type="text" # Now we filter this to only look at one of our group member's Census tract in Cambridge, MA, which is in `state = 25`, `county=017`, `tract = 353300` # # --- # # # # + id="YrkDR_gpMUlD" colab_type="code" outputId="103cea0e-9249-4b83-91de-e8c9269b64e2" colab={"base_uri": "https://localhost:8080/", "height": 94} massachusetts[(massachusetts['state'] == '25') & (massachusetts['county'] == '017') & (massachusetts['tract'] == '353300')] # + [markdown] id="jFbKFhtkDDmn" colab_type="text" # Thus we see that this census tract has a total population of 3,471. # + [markdown] id="6vzuTiWUNVj0" colab_type="text" # ## Google Earth Engine # Google Earth Engine allows for easy access to a vast array of satellite imagery, and -- importantly for our project -- also contains geometries of Census Tracts and Census Blocks. Below we authenticate Earth Engine to run within this Colab notebook. # # + id="Lz9Sa5_cNRaY" colab_type="code" colab={} import ee # + id="ZjdpPkryONSk" colab_type="code" outputId="e47413d9-9282-487a-be10-dc1e429f6411" colab={"base_uri": "https://localhost:8080/", "height": 178} # Trigger the authentication flow. ee.Authenticate() # Initialize the library. ee.Initialize() # + [markdown] id="2-RJcFstPCko" colab_type="text" # We will also allow this to read and write from Google Drive, and then import the `folium` library to visualize maps. # + id="yMZ_XekmOP_W" colab_type="code" colab={} from google.colab import auth auth.authenticate_user() # + id="4uFQQV4iOqIu" colab_type="code" outputId="9257938f-7e30-4834-f8c4-3e0a52c881b6" colab={"base_uri": "https://localhost:8080/", "height": 34} import folium print(folium.__version__) # + [markdown] id="b0s-VNmaV59n" colab_type="text" # ## Satellite Images # # + [markdown] id="-pniJfM1QlIZ" colab_type="text" # ### Landsat # The USGS and NASA's Landsat satelites have been taking pictures of the world since the 1970s. The two most recent satellites, Landsat 7 and 8, are both integrated with Earth Engine, and provide a vast array of imformation such a surface reflection, burn, vegatation, and temperature information. # # Ultimately -- and as we describe below -- we elected to go with data from the EU's Copernicus program. However, it is instructive to first provide some insight into our investigation of the Landsat data (especially for readers considering alternative projects with geospatial data), so we provide that description below. See further below for the data processing we do with the Copernicus dataset, which we ultimately used for our project. # # In both cases, we rely strictly on the RGB bands from the surface reflection data, which correspond to bands 2,3, and 4. # # Given our Census data is from 2010, we initially wanted to get data from as close to 2010 as possible, which would come from Landsat 7, which has been recording images since 1999. # # + [markdown] id="j44t9B5iV-Tq" colab_type="text" # # ### Removing Clouds # One consideration when working with satellite images is the presense of clouds in many images. To combat this, we mask them via bitshifting with a method adaped from this [notebook](https://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/AI_platform_demo.ipynb). # # + id="tu8QG_lLP7Hi" colab_type="code" outputId="e3ff23df-21a8-41df-b284-151281bdbce1" colab={"base_uri": "https://localhost:8080/", "height": 674} # Cloud masking function. fc = ee.FeatureCollection('TIGER/2016/States').filter(ee.Filter.eq('NAME', 'Massachusetts')) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() L8SR = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR") BANDS = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'] # Cloud masking function. # Cloud masking function. def maskL8sr(image): cloudShadowBitMask = ee.Number(2).pow(3).int() cloudsBitMask = ee.Number(2).pow(5).int() qa = image.select('pixel_qa') mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And( qa.bitwiseAnd(cloudsBitMask).eq(0)) return image.updateMask(mask).select(BANDS).divide(10000) # The image input data is a 2018 cloud-masked median composite. image = L8SR.filterDate('2013-01-01', '2013-12-31').map(maskL8sr).median().clip(aoi) # Use folium to visualize the imagery. mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3}) map = folium.Map(location=[42.3612549,-71.1088637]) folium.TileLayer( tiles=mapid['tile_fetcher'].url_format, attr='Map Data © Google Earth Engine', overlay=True, name='median composite', ).add_to(map) map.add_child(folium.LayerControl()) map # + [markdown] id="43emI_KzVUDZ" colab_type="text" # Note the map above, which has a map of Massachusetts with clouds removed. # # Now we will examine the specific census tract in Cambridge, MA that we pinpointed before. The geometries of every US census tract are accessible via https://developers.google.com/earth-engine/datasets/catalog/TIGER_2010_Tracts_DP1, which also includes some additional information about each tract. # + id="lPYIp3GmW-G-" colab_type="code" outputId="ad6c632a-2109-462c-a7b9-e6c7625eaf9d" colab={"base_uri": "https://localhost:8080/", "height": 674} # Cloud masking function. fc = ee.FeatureCollection('TIGER/2016/States').filter(ee.Filter.eq('NAME', 'Massachusetts')) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() L8SR = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR") BANDS = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'] # Cloud masking function. # Cloud masking function. def maskL8sr(image): cloudShadowBitMask = ee.Number(2).pow(3).int() cloudsBitMask = ee.Number(2).pow(5).int() qa = image.select('pixel_qa') mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And( qa.bitwiseAnd(cloudsBitMask).eq(0)) return image.updateMask(mask).select(BANDS).divide(10000) # The image input data is a 2018 cloud-masked median composite. image = L8SR.filterDate('2013-01-01', '2013-12-31').map(maskL8sr).median().clip(aoi) # Use folium to visualize the imagery. mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3}) map = folium.Map(location=[42.3612549,-71.1088637]) folium.TileLayer( tiles=mapid['tile_fetcher'].url_format, attr='Map Data © Google Earth Engine', overlay=True, name='median composite', ).add_to(map) map.add_child(folium.LayerControl()) map # + id="jEwsKKnsVe_c" colab_type="code" outputId="2e536dbb-4faa-48f3-cc15-a52e9931e1c6" colab={"base_uri": "https://localhost:8080/", "height": 710} fc = ee.FeatureCollection("TIGER/2010/Tracts_DP1").filter(ee.Filter.eq('geoid10', '25017353300')) print(fc.size().getInfo()) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() print(fc.size().getInfo()) L8SR = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR") BANDS = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'] # Cloud masking function. # Cloud masking function. def maskL8sr(image): cloudShadowBitMask = ee.Number(2).pow(3).int() cloudsBitMask = ee.Number(2).pow(5).int() qa = image.select('pixel_qa') mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And( qa.bitwiseAnd(cloudsBitMask).eq(0)) return image.updateMask(mask).select(BANDS).divide(10000) # The image input data is a 2018 cloud-masked median composite. image = L8SR.filterDate('2013-01-01', '2013-12-31').map(maskL8sr).median().clip(aoi) # Use folium to visualize the imagery. mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3}) map = folium.Map(location=[42.3612549,-71.1088637], zoom_start=15) folium.TileLayer( tiles=mapid['tile_fetcher'].url_format, attr='Map Data © Google Earth Engine', overlay=True, name='median composite', ).add_to(map) map.add_child(folium.LayerControl()) map # + [markdown] id="XKhSefdbYQEG" colab_type="text" # Above is the census tract. Unfortunately, this image is much blurrier than anticipated. A quick investigation reveals that Landsat has a resolution of 30 meters per pixel, which seems suboptimal for the purpose of pulling information from at the lowest level. As such, we decided to move to another satellite image source found on Earth Engine, the EU's Copernicus Sentinel-2 satellite (https://earth.esa.int/web/sentinel/user-guides/sentinel-2-msi/resolutions/radiometric), which has a resolution of 10 meters per pixel, which we believe is better for our analysis. # # It should be noted that the images that Google uses for Google Earth and Google Maps are **not** licensed for use on Earth Engine, and are provided by DigitalGlobe. These images are as good as 0.31 meters per pixel, but are not free for use and are licensed by Google. As we detail below, accessing this data would be one path worth pursuing in future projects to improve the models' performance. # + [markdown] id="bQs6SE2YaA6E" colab_type="text" # ### Copernicus # Cambridge census tract using the [Copernicus Sentinel-2 Surface Reflection data](https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2_SR) as the image source. # + id="TcoG9IUeXnaG" colab_type="code" outputId="349028b3-6bc0-49ec-b3be-6fdf7f38163b" colab={"base_uri": "https://localhost:8080/", "height": 710} fc = ee.FeatureCollection("TIGER/2010/Tracts_DP1").filter(ee.Filter.eq('geoid10', '25017353300')) print(fc.size().getInfo()) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() print(fc.size().getInfo()) image = ee.ImageCollection('COPERNICUS/S2_SR').filter(ee.Filter.calendarRange(4,10,'month')).filterBounds(aoi).filterMetadata('CLOUD_COVERAGE_ASSESSMENT', 'less_than', 30) image = image.median().clip(aoi) mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 2000}) map = folium.Map(location=[42.3612549,-71.1088637], zoom_start=15) folium.TileLayer( tiles=mapid['tile_fetcher'].url_format, attr='Map Data © Google Earth Engine', overlay=True, name='median composite', ).add_to(map) map.add_child(folium.LayerControl()) map # + [markdown] id="n3DXFdgcdjk7" colab_type="text" # As one can see, this image is significantly higher resolution than the Landsat 7 image. Moreover, the process of removing cloud is easier as Copernicus computes a `CLOUD_COVERAGE_ASSESSMENT`. Thus, in the code above, we filter to focus on the census block, and then take the median of all images of that geography with a `CLOUD_COVERAGE_ASSESSMENT` of less than 30. # # One limitation here is that images are not availble from the earlier part of of the 2010s. Thus, one key issue with our subsequent analyses is that that the images are not from the same year as the data. # + [markdown] id="9Z0ssUMo5mVW" colab_type="text" # # Data Pipeline # ## Saving Images # While Earth Engine does provide native functionality for exporting images to Tensorflow/Keras, we found it was actually easier to simply export each image as a PNG, thus creating a layer abstraction between our models and the data itself. This also made it easier to share the dataset amongst our group members, and the .png files made for a familiar format for use in our models. We exported the images in the following manner, labeling each tract by its `geoid`. # # # + [markdown] id="AQUl5YNY6vEs" colab_type="text" # First, pull back each tract's data # + id="L-pdqqVV6st2" colab_type="code" outputId="8ca57862-8c54-41d5-fe9c-499a03fd76ce" colab={"base_uri": "https://localhost:8080/", "height": 893} # !apt install libspatialindex-dev # !pip install cenpy import cenpy massachusetts = cenpy.products.Decennial2010().from_state('Massachusetts', variables=['P001001'], level="tract") massachusetts.head() # + [markdown] id="w2FbKk-t6_2M" colab_type="text" # Below we download a 512 x 512 version of the images. # + id="vR-VlD88bF4N" colab_type="code" outputId="839d3559-6c9e-4692-8779-6374fa686c37" colab={"base_uri": "https://localhost:8080/", "height": 493, "referenced_widgets": ["53a91230688a4d3988aeb9387e9a9150", "e89b319e273f46e78fa70db34de85fd5", "d486a91fcc9848cc81ecaa17d7bf3054", "c82da68db2634ccfab978502105a1a05", "b417ba7d54c14cb6b4c1da056563dcc6", "64382460351c48419fa3b7104539fe48", "e198b2fa372a4b2c8bce49b85857188f", "b6b96281e5664fe0902c0bc5418ca1b5"]} import os.path from tqdm.auto import tqdm grouped = massachusetts.groupby(['tract', 'county']) tracts = grouped[['tract', 'county']] def save_tract_image(tract): (this_tract, this_county) = tract[0] geoid = '25'+this_county+this_tract save_path = "/content/drive/My Drive/summer_retry/"+geoid+".png" if os.path.exists(save_path): pass else: fc = ee.FeatureCollection("TIGER/2010/Tracts_DP1").filter(ee.Filter.eq('geoid10', geoid)) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() image = ee.ImageCollection('COPERNICUS/S2_SR').filter(ee.Filter.calendarRange(4,10,'month')).filterBounds(aoi).filterMetadata('CLOUD_COVERAGE_ASSESSMENT', 'less_than', 30) image = image.median().clip(aoi) parameters = {'min': 0, 'max': 2000, 'dimensions': 512, 'bands': ['B4', 'B3', 'B2'], 'region':aoi} url = image.getThumbUrl(parameters) # urllib.request.urlretrieve(url, "/content/drive/My Drive/summer_retry/"+geoid+".png") for tract in tqdm(tracts): save_tract_image(tract) # + [markdown] id="YbFsy05K_vFX" colab_type="text" # One potential issue with this approach is that we create a 512x512 image of every census tract, despite the fact that tracts' land areas can differ significantly. One thing we can do to potentially combat this is to also record the area of each tract, and later feed that as another parameter into the models. We try this later in the experiments section. Thus, below we will create a dictionary of each tract and and its area, as calculated by the the area of the geometry in Earth Engine (which has a convenient built-in method to do so). We divide by `1000 * 1000` to convert the area from square meters to square kilometers. # + id="RQkL1OHi7FDA" colab_type="code" outputId="6d799fb9-cd92-42ce-b724-5152216f9179" colab={"base_uri": "https://localhost:8080/", "height": 493, "referenced_widgets": ["f4b548eb2517483d9e42553b926014e5", "adcac12c6ecf4e78bb65ff2c0139b744", "b4429e47494c4ea99017f7d81c0c3320", "dadb47a0fe844961b38a07f38b047bfc", "1bdf105e4196445d9e68164039269920", "a95399e471214664b3eec7a83d67852f", "", ""]} import os.path grouped = massachusetts.groupby(['tract', 'county']) tracts = grouped[['tract', 'county']] tract_areas = {} def extract_tract_area(tract): (this_tract, this_county) = tract[0] geoid = '25'+this_county+this_tract fc = ee.FeatureCollection("TIGER/2010/Tracts_DP1").filter(ee.Filter.eq('geoid10', geoid)) test = fc.getInfo()['features'] feature = ee.Feature(test[0]) aoi = feature.geometry() tract_areas[geoid] = aoi.area().divide(1000 * 1000).getInfo(); for tract in tqdm(tracts): extract_tract_area(tract) # + [markdown] id="O4HBOcEDAPlX" colab_type="text" # To prevent having to run this again, let me pickle it. # # + id="PO4FzkGVAMge" colab_type="code" colab={} # import pickle # pickle.dump( tract_areas, open( "/content/drive/My Drive/areas.p", "wb" ) ) # + id="Njl0f9cQATnY" colab_type="code" colab={} import pickle tract_areas = favorite_color = pickle.load( open( "/content/drive/My Drive/AM216/geospatial_analysis/data/areas.p", "rb" )) # + [markdown] id="QdhEg0XSIPwE" colab_type="text" # Another issue is the images are not the same size, though their largest dimension is 512. For example, some may be 459x512, or 500x512. We thus create a function to pad the images like so: # + id="lFZtG4C_AZt1" colab_type="code" colab={} def pad_images(x): padded_x = np.zeros((512,512,3), dtype=np.uint8) x0 = x if x0.shape[0] != 512: pad_left = (512 - x0.shape[0])//2 pad_right = x0.shape[0]+pad_left padded_x[pad_left:pad_right, :x0.shape[1],:] = x0 if x0.shape[1] != 512: pad_top = (512 - x0.shape[1])//2 pad_bottom = x0.shape[1]+pad_top padded_x[:x0.shape[0], pad_top:pad_bottom,:] = x0 return np.array(padded_x) # + [markdown] id="wFfFV5IFIdqy" colab_type="text" # Here is a sample of the padded images: # + id="DwLN4hJNI0pv" colab_type="code" outputId="a7750053-e181-4dfe-cd52-489c5f9ff373" colab={"base_uri": "https://localhost:8080/", "height": 286} import imageio from tqdm.auto import tqdm import matplotlib.pyplot as plt im = imageio.imread('/content/drive/My Drive/summer_resized/25001010100.png') plt.imshow(im) # + [markdown] id="oVSpOY1VJewp" colab_type="text" # ### Tensorflow Data # Given the size of the images, storing all of them in memory is not feasible. We thus leverage Tensorflow's `ImageDataGenerator` and `flow_from_dataframe` methods, which allow us to use an auxiliary Pandas dataframe containing the filename of the image as well as the population figure. # # First we will create this DataFrame, using CenPy # + id="hgx_BaM6OIj-" colab_type="code" outputId="63aa5aa9-eeb0-462a-bc47-e998429b63a0" colab={"base_uri": "https://localhost:8080/", "height": 120, "referenced_widgets": ["0f3b0c766a04410f9e0d3246b88f482c", "9e9d2fd3955b4d2cb0730282d9a3ab8e", "230035818853492aabd20ebb7123c28a", "61ea164ffc064f2196f44b192e1caf42", "ef2f8d2625614022b852aba708723157", "bfd6abe99b1e41459159803c38ec0622", "8c5ab01583d4452ea6331672b38fb043", "afc364a46c2c492082f46a7b077afba4"]} import numpy as np from tqdm.auto import tqdm import pandas as pd import h5py from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split import pathlib import folium import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Input, GlobalAveragePooling2D from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate from tensorflow.keras.optimizers import Adam, SGD from matplotlib import pyplot as plt import os from tensorflow.python.platform import gfile from tensorflow.python.lib.io import file_io from io import BytesIO data_dir = pathlib.Path('/content/drive/My Drive/summer_resized/') # list_ds = tf.data.Dataset.list_files(str(data_dir/'*/*')) image_count = len(list(data_dir.glob('*.png'))) print(image_count) IMAGE_NAMES = np.array([item.name for item in data_dir.glob('*.png') if item.name != "LICENSE.txt"]) print(IMAGE_NAMES) POPULATION_VALS = [] for filename in tqdm(IMAGE_NAMES): state = filename[:2] county = filename[2:5] tract = filename[5:-4] pop = massachusetts[(massachusetts['state'] == state) & (massachusetts['county'] == county) & (massachusetts['tract'] == tract)].P001001.values[0] POPULATION_VALS.append((filename,pop)) pop_df = pd.DataFrame(POPULATION_VALS, columns =['filename', 'population']) # + id="kIH7x9TEZZpg" colab_type="code" colab={} BATCH_SIZE = 16 IMG_HEIGHT = 256 IMG_WIDTH = 256 image_dir = '/content/drive/My Drive/summer_resized' STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE) # + id="gmb_RU_-xkbn" colab_type="code" outputId="d03a53f8-b221-4b66-ff20-d909e72b5042" colab={"base_uri": "https://localhost:8080/", "height": 125} from google.colab import drive drive.mount('/content/drive') # + [markdown] id="dvKrruAPNmbC" colab_type="text" # Now create the ImageDataGeneator, which will allow us to flow images. # + id="3gt8IBe-JkDJ" colab_type="code" colab={} train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255, validation_split=0.2) # + [markdown] id="0iRQY02gZ-F8" colab_type="text" # Finally, we create the generators for the training and validation. # + id="uYk3lmPTaG2d" colab_type="code" outputId="efdbb4aa-ffec-4c3c-deab-c6e699016384" colab={"base_uri": "https://localhost:8080/", "height": 34} train_generator = train_datagen.flow_from_dataframe(dataframe=pop_df, directory=image_dir, x_col="filename", y_col="population", has_ext=True, class_mode="other", target_size=(IMG_WIDTH, IMG_HEIGHT), shuffle=True, batch_size=BATCH_SIZE,subset='training') # + id="mLMNMj0yaOe8" colab_type="code" outputId="c78a4d75-a4ea-4b69-a353-5e0d1a3f7486" colab={"base_uri": "https://localhost:8080/", "height": 34} validation_generator = train_datagen.flow_from_dataframe(dataframe=pop_df, directory=image_dir, x_col="filename", y_col="population", has_ext=True, class_mode="other", target_size=(IMG_WIDTH, IMG_HEIGHT), batch_size=BATCH_SIZE,subset='validation') # + [markdown] id="mFpeFvVjvX_L" colab_type="text" # # Models # **Note:** The weights for our models are on our github repository, found [here](https://github.com/chickert/geospatial_analysis). # # # + [markdown] id="-z9xNbue4yCk" colab_type="text" # ## FFNN # Our first model was a simple feed-forward neural network. This models flattens the image and then has two dense layers, before ouputting a number using linear activation. Note the summary below # # + id="u9bhXxn7atv8" colab_type="code" outputId="cc91e6a3-80a5-438c-84cd-3d8b17472800" colab={"base_uri": "https://localhost:8080/", "height": 336} input1 = Input(shape=(512,512,3)) flat = Flatten()(input1) dense1 = Dense(32, activation='relu')(flat) dense2 = Dense(32, activation='relu')(dense1) output = Dense(1, activation='linear')(dense2) FFNN = Model(inputs=input1, outputs=output) FFNN.summary() # + id="L23CBZcRzLqY" colab_type="code" colab={} STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size FFNN.compile(loss='mean_squared_error', optimizer='adam') # + [markdown] id="8CI-iRSizjJu" colab_type="text" # Now we'll train it for 10 epochs, using MSE as loss and `adam` as the optimizer. # + id="c7IEXTvrzVJX" colab_type="code" outputId="b0339f29-2020-4d06-a255-9bc7d159a1b4" colab={"base_uri": "https://localhost:8080/", "height": 52} # model_history = FFNN.fit(train_generator, steps_per_epoch=STEP_SIZE_TRAIN,validation_data = validation_generator, validation_steps=STEP_SIZE_VALID, epochs = 10) FFNN.load_weights('/content/drive/My Drive/AM216/geospatial_analysis/models/FFNN.h5') FFNN.evaluate(validation_generator, steps=STEP_SIZE_VALID) # + [markdown] id="fXWFaNgE3x5Q" colab_type="text" # Above we've evlauated this model on our validation dataset. The validation of loss of ~3752094 is, frankly, bad. To put this in more relatable terms, this means that each estimate of population was, on average, off by 1,937 people. We will thus attempt to improve on this with CNNs. # + [markdown] id="XWiQTiAHBbRj" colab_type="text" # ## Simpler CNN Models # # The main motivation behind buidling simpler, shallower CNN models was because we wanted to test how well models with CNN layers performed in comparison to our FFNN model with only dense layers. Generally, convolutional neural networks are able to represent image data better than dense layers because CNNs can better capture the spatial and temporal dependencies in an image. # # Note that due to the large amount of data, we found that adding more filters or more layers would require too much memory than google colab, could allocate (OOM error). This is another reason we started with building smaller models than our deep sequential or deep unet - we decided train them for longer (30 epochs). We see the architecture for these models below. Note: we will run the unet models on an AWS instance - details are included below. # # Reference source for CNNs: https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53 # + [markdown] id="nfMQelavB0Ob" colab_type="text" # ### CNN Model 1 # # - 2 Conv2D layers with filter size = 64, and relu activation # - 2 Conv2D layers with filter size = 64, and relu activation # - Batch normalization layers in between Conv2D layers # - Maxpooling in between Conv2D layers # - 1 Dense layer with 32 filters, relu activation # - 1 Dropout layer # - 1 output dense layer with linear activation # # Note, we experimented with many learning rates and found that 0.0001 resulted in optimal model performance. # + id="p7URKIAEBxf3" colab_type="code" outputId="2ebef0a1-8636-4a7f-a8de-288e5f7f2312" colab={"base_uri": "https://localhost:8080/", "height": 638} BATCH_SIZE = 8 IMG_HEIGHT = 512 IMG_WIDTH = 512 STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE) train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255, validation_split=0.2) train_generator = train_datagen.flow_from_dataframe(dataframe=pop_df, directory=image_dir, x_col="filename", y_col="population", has_ext=True, class_mode="other", target_size=(IMG_WIDTH, IMG_HEIGHT), batch_size=BATCH_SIZE,subset='training') validation_generator = train_datagen.flow_from_dataframe(dataframe=pop_df, directory=image_dir, x_col="filename", y_col="population", has_ext=True, class_mode="other", target_size=(IMG_WIDTH, IMG_HEIGHT), batch_size=BATCH_SIZE,subset='validation') # build model cnn_model1 = Sequential() cnn_model1.add(Input(shape=(512,512,3))) cnn_model1.add(Conv2D(64,(3,3), padding="same", activation='relu')) cnn_model1.add(tf.keras.layers.BatchNormalization()) cnn_model1.add(Conv2D(64,(3,3), padding="same", activation='relu')) cnn_model1.add(MaxPooling2D((2,2), strides=(2,2))) cnn_model1.add(Conv2D(32,(3,3), padding="same", activation='relu')) cnn_model1.add(tf.keras.layers.BatchNormalization()) cnn_model1.add(Conv2D(32,(3,3),padding="same", activation='relu')) cnn_model1.add(MaxPooling2D((2,2), strides=(2,2))) cnn_model1.add(Flatten()) cnn_model1.add(Dense(32, activation='relu')) cnn_model1.add(Dropout(0.3)) cnn_model1.add(Dense(1, activation='linear')) print(cnn_model1.summary()) # build model STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size cnn_model1.compile(loss='mean_squared_error', optimizer=Adam(0.0001)) # + id="o3Hk5WbXGdEf" colab_type="code" colab={} # cnn_model1_history = cnn_model1.fit(train_generator, # steps_per_epoch=STEP_SIZE_TRAIN, # validation_data = validation_generator, # validation_steps=STEP_SIZE_VALID, # epochs = 30) # cnn_model1.save_weights("model3_weights.h5") cnn_model1.load_weights('/content/drive/My Drive/AM216/geospatial_analysis/models/cnn1.h5') cnn_model1.evaluate(validation_generator, steps=STEP_SIZE_VALID) # + [markdown] id="Gp_wZLJqG2Ha" colab_type="text" # Given the simplicity of this model, it performed decently, since its best validation MSE score was 2012269.8333, which implies that the model had an average population error of 1418.54. This is better performance than the FFNN average error, although it can still be improved. We also tried implementing a simpler CNN model with CNN layers that have 32 filters. # + [markdown] id="ie0uzHZAG6Z-" colab_type="text" # ### CNN Model 2 # # - 2 Conv2D layers with filter size = 32, and relu activation # - 1 Batch normalization layer in between Conv2D layers # - 1 Dense layer with 32 filters, relu activation # - 1 Dropout layer # - 1 output dense layer with linear activation # # Note, we experimented with many learning rates and found that 0.00001 resulted in optimal model performance. # + id="nCCy5xgTG9LB" colab_type="code" outputId="b43431b8-c63b-4efe-954e-a145254bb43d" colab={"base_uri": "https://localhost:8080/", "height": 425} cnn_model2 = Sequential() cnn_model2.add(Input(shape=(512,512,3))) cnn_model2.add(Conv2D(32,(3,3), padding="same", activation='relu')) cnn_model2.add(tf.keras.layers.BatchNormalization()) cnn_model2.add(Conv2D(32,(3,3), padding="same", activation='relu')) cnn_model2.add(Flatten()) cnn_model2.add(Dense(32, activation='relu')) cnn_model2.add(Dropout(0.3)) cnn_model2.add(Dense(1, activation='linear')) print(cnn_model2.summary()) # compile STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size cnn_model2.compile(loss='mean_squared_error', optimizer=Adam(0.00001)) # + id="RPmvd66aHAEh" colab_type="code" colab={} # # fit # # cnn_model2_history = cnn_model2.fit(train_generator, # # steps_per_epoch=STEP_SIZE_TRAIN, # # validation_data = validation_generator, # # validation_steps=STEP_SIZE_VALID, # # epochs = 30) # # cnn_model2.save_weights("model4_weights.h5") cnn_model2.load_weights('/content/drive/My Drive/AM216/geospatial_analysis/models/cnn2.h5') cnn_model2.evaluate(validation_generator, steps=STEP_SIZE_VALID) # + [markdown] id="v0cLUQVmHG1H" colab_type="text" # We also tried to decrease filtersize, and run for many epochs to see if this would improve performance. The lowest validation MSE loss achieved was 2531170.9358, which means the model had an average population error of 1590.96. This did not perform as well as model3, but it still is better than the FFNN. We still would like to improve performance by experimenting with different architectures and depths, so we decided to move our model fitting to AWS that allocates enough memory and speeds up performance of our models. # + [markdown] id="Cqmm9B4BZOFi" colab_type="text" # ## Deep Sequential CNN # # Given the improvement in performance we often see by deepening networks that are already partially successful, we investigated the success of deeper sequential CNNs. This investigation introduced new issues. The Google Colab notebook sessions were liable to crash due to out-of-memory (OOM) errors, and even when they did not crash, the training was exceptionally slow. # # To address this, we migrated the code to AWS EC2 virtual machines, using the p3.2xlarge instance and the Anaconda environments that come pre-installed with the Deep Learning Amazon Machine Image. These instances use Tesla V100 GPUs, which come with 16GB of RAM and were sufficient to prevent OOM issues. They also trained the models much more quickly, allowing us to iterate on different design features more quickly for our comparisons. # # # This model achieved a validation loss as low as ~2.16mn, which amounts to miscounting the population of a census tract by an average of 1,469 people. Further motivation behind our specific architecture can be found [here](http://web.stanford.edu/class/archive/cs/cs221/cs221.1192/2018/restricted/posters/kfuhs/poster.pdf). # # # # + id="_h6Mpgy_ZLrb" colab_type="code" outputId="46fb0ee2-e097-4ec5-d04a-2207ba1bd9e8" colab={"base_uri": "https://localhost:8080/", "height": 816} print("building model") deep_model = Sequential() deep_model.add(Input(shape=(512,512,3))) deep_model.add(Conv2D(64,(3,3), padding="same", activation='relu')) deep_model.add(Conv2D(64,(3,3), padding="same", activation='relu')) deep_model.add(MaxPooling2D((2,2), strides=(2,2))) deep_model.add(Conv2D(128,(3,3), padding="same", activation='relu')) deep_model.add(Conv2D(128,(3,3),padding="same", activation='relu')) deep_model.add(MaxPooling2D((2,2), strides=(2,2))) deep_model.add(Conv2D(256,(3,3), padding="same", activation='relu')) deep_model.add(Conv2D(256,(3,3),padding="same", activation='relu')) deep_model.add(UpSampling2D((2,2))) deep_model.add(Conv2D(128,(3,3), padding="same", activation='relu')) deep_model.add(Conv2D(128,(3,3),padding="same", activation='relu')) deep_model.add(UpSampling2D((2,2))) deep_model.add(Conv2D(64,(3,3), padding="same", activation='relu')) deep_model.add(Conv2D(64,(3,3),padding="same", activation='relu')) deep_model.add(Conv2D(1,(1,1),padding="same", activation='linear')) deep_model.add(Flatten()) deep_model.add(Dropout(0.5)) deep_model.add(Dense(1, activation='linear')) deep_model.summary() STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size optimizer = Adam() deep_model.compile(loss='mean_squared_error', optimizer=optimizer) # fit # model_history = deep_model.fit(train_generator, # steps_per_epoch=STEP_SIZE_TRAIN, # validation_data = validation_generator, # validation_steps=STEP_SIZE_VALID, # epochs = 12) # deep_model.save_weights("./deepseqcnn_weights.h5") # + id="EylJEtUrZfFI" colab_type="code" colab={} deep_model.load_weights("./deepseqcnn_weights.h5") # + [markdown] id="7DIzC9XRabSY" colab_type="text" # ## UNet Model # # To see if we could improve upon our result using the deep sequential CNN with the learning rate scheduler, we next experimented with the UNet model. This model was initially created for image segmentation problems, where it outputs a complete high resolution image. In contrast to the deep sequential model above, UNet's decoder contains skip connection layers, which combine the output of feature maps of the encoder with previous convolution layers. We can tailor # this model to our problem, where the model can potentially classify residential areas vs non-residential areas these satellite images. Then, because our problem is a regression, # we added layers afterwards to output a parameter representing population count. To reduce problem size and overfitting, we also added a dropout layer in between the two dense layers. More information on the [original UNet paper](https://arxiv.org/pdf/1505.04597.pdf) for reference, and further motivation behind building this model can be found [here](http://web.stanford.edu/class/archive/cs/cs221/cs221.1192/2018/restricted/posters/kfuhs/poster.pdf). # # This resulted in a validation loss as low as ~2.28mn, which amounts to miscounting population by an average of 1,509 per census tract. # # # + id="i9XkC2eHaeZD" colab_type="code" outputId="0bbe4c5a-6b5e-41c4-f2d3-66ee1559e0c0" colab={"base_uri": "https://localhost:8080/", "height": 976} print("building model") input_feat = Input(shape=(512,512,3)) conv_layer1 = Conv2D(64,(3,3), padding="same", activation='relu')(input_feat) conv_layer2 = Conv2D(64,(3,3), padding="same", activation='relu')(conv_layer1) max_pool1 = MaxPooling2D((2,2), strides=(2,2))(conv_layer1) conv_layer3 = Conv2D(128,(3,3), padding="same", activation='relu')(max_pool1) conv_layer4 = Conv2D(128,(3,3), padding="same", activation='relu')(conv_layer3) max_pool2 = MaxPooling2D((2,2), strides=(2,2))(conv_layer4) conv_layer5 = Conv2D(256,(3,3), padding="same", activation='relu')(max_pool2) conv_layer6 = Conv2D(256,(3,3),padding="same", activation='relu')(conv_layer5) # upsampling and concatenating upsample1 = UpSampling2D((2,2))(conv_layer6) concat1 = Concatenate()([upsample1, conv_layer4]) conv_layer7 = Conv2D(128,(3,3), padding="same", activation='relu')(concat1) conv_layer8 = Conv2D(128,(3,3),padding="same", activation='relu')(conv_layer7) upsample2 = UpSampling2D((2,2))(conv_layer8) concat2 = Concatenate()([upsample2, conv_layer2]) conv_layer9 = Conv2D(64,(3,3), padding="same", activation='relu')(concat2) conv_layer10 = Conv2D(64,(3,3),padding="same", activation='relu')(conv_layer9) output = Conv2D(1,(1,1),padding="same", activation='linear')(conv_layer10) reshape_layer1 = Flatten()(output) reshape_layer2 = Dropout(0.3)(reshape_layer1) final_output = Dense(1, activation='linear')(reshape_layer2) # build model unet_model = tf.keras.Model(input_feat, final_output) unet_model.summary() STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size unet_model.compile(loss='mean_squared_error', optimizer='adam') # fit print("training...") # model_history = unet_model.fit(train_generator, # steps_per_epoch=STEP_SIZE_TRAIN, # validation_data = validation_generator, # validation_steps=STEP_SIZE_VALID, # epochs = 12) # unet_model.save_weights("./unet_weights.h5") unet_model.load_weights("./unet_weights.h5") # + [markdown] id="e8jULD1Vamym" colab_type="text" # ## UNet Model with Dense Layers # # The UNet archictecture was originally designed for image segmentation with the ultimate purpose of classification. However, since our use-case is a regression problem, we experimented with adding several dense layers (in addition to the one in the model above necessary to output a population estimate) after the convolutional segment of the network. # # However, we found that this did not improve performance. The model architecture is defined below, and only drops the validation loss as low as ~2.62mn, which amounts to miscounting the population by 1,618 heads per census tract. # # + id="X-aq8MNDaris" colab_type="code" outputId="3ca9a71b-7e97-49ca-e10f-a0f8158a7a94" colab={"base_uri": "https://localhost:8080/", "height": 1000} print("building model") input_feat = Input(shape=(512,512,3)) conv_layer1 = Conv2D(64,(3,3), padding="same", activation='relu')(input_feat) conv_layer2 = Conv2D(64,(3,3), padding="same", activation='relu')(conv_layer1) max_pool1 = MaxPooling2D((2,2), strides=(2,2))(conv_layer1) conv_layer3 = Conv2D(128,(3,3), padding="same", activation='relu')(max_pool1) conv_layer4 = Conv2D(128,(3,3), padding="same", activation='relu')(conv_layer3) max_pool2 = MaxPooling2D((2,2), strides=(2,2))(conv_layer4) conv_layer5 = Conv2D(256,(3,3), padding="same", activation='relu')(max_pool2) conv_layer6 = Conv2D(256,(3,3),padding="same", activation='relu')(conv_layer5) # upsampling and concatenating upsample1 = UpSampling2D((2,2))(conv_layer6) concat1 = Concatenate()([upsample1, conv_layer4]) conv_layer7 = Conv2D(128,(3,3), padding="same", activation='relu')(concat1) conv_layer8 = Conv2D(128,(3,3),padding="same", activation='relu')(conv_layer7) upsample2 = UpSampling2D((2,2))(conv_layer8) concat2 = Concatenate()([upsample2, conv_layer2]) conv_layer9 = Conv2D(64,(3,3), padding="same", activation='relu')(concat2) conv_layer10 = Conv2D(64,(3,3),padding="same", activation='relu')(conv_layer9) output = Conv2D(1,(1,1),padding="same", activation='linear')(conv_layer10) added_layer1 = Flatten()(output) added_layer2 = Dropout(0.3)(added_layer1) added_layer3 = Dense(256, activation='relu')(added_layer2) added_layer4 = Dropout(0.3)(added_layer3) added_layer5 = Dense(64, activation='relu')(added_layer4) added_layer6 = Dropout(0.3)(added_layer5) final_output = Dense(1, activation='linear')(added_layer6) # build model unet_model = tf.keras.Model(input_feat, final_output) unet_model.summary() STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size unet_model.compile(loss='mean_squared_error', optimizer='adam') # fit # print("training...") # model_history = unet_model.fit(train_generator, # steps_per_epoch=STEP_SIZE_TRAIN, # validation_data = validation_generator, # validation_steps=STEP_SIZE_VALID, # epochs = 11) # unet_model.save_weights("./unetwdenselayers_weights.h5") unet_model.load_weights("./unetwdenselayers_weights.h5") # + [markdown] id="XO6RkalU57eb" colab_type="text" # ## Transfer Learning # While the task of population prediction is relatively distinct from any sort of classification task, we created a model build on top of MobileNet with a few dense layers added to the end before the linear activation layer. More information about MobileNet is availible here: https://arxiv.org/abs/1704.04861. Considering this was a last-ditch effort, this model was a pleasant surprise. Below I define the model and evaluate it on our validation dataset. # + id="zGpl6_4VzlnG" colab_type="code" outputId="484122c3-94b9-4a69-d936-ed15e164f701" colab={"base_uri": "https://localhost:8080/", "height": 1000} from tensorflow.keras.applications import MobileNet base_model=MobileNet(weights='imagenet',include_top=False, input_shape=(256,256,3)) #imports the mobilenet model and discards the last 1000 neuron layer. x=base_model.output x=GlobalAveragePooling2D()(x) x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results. x=Dense(1024,activation='relu')(x) #dense layer 2 x=Dense(512,activation='relu')(x) #dense layer 3 preds=Dense(1,activation='linear')(x) transfer_model = Model(base_model.input, preds) transfer_model.summary() # + [markdown] id="5TSPO5HvD4Kh" colab_type="text" # Now we evaluate it on the validation # + id="1gcqd5ZsD_HH" colab_type="code" outputId="790c7324-c23a-4ed5-ad2e-42eb301ca9fd" colab={"base_uri": "https://localhost:8080/", "height": 260} STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size transfer_model.compile(loss='mean_squared_error', optimizer='adam') transfer_model.load_weights('/content/drive/My Drive/AM216/geospatial_analysis/models/trans.h5') # model_history = transfer_model.fit(train_generator, steps_per_epoch=STEP_SIZE_TRAIN,validation_data = validation_generator, validation_steps=STEP_SIZE_VALID, epochs = 10) # transfer_model.evaluate(validation_generator, steps=STEP_SIZE_VALID) # + [markdown] id="nOqugRSC14w1" colab_type="text" # As you can see above, the validation loss of ~1871448 equates an average error in population of 1368. While certainly not perfect, this is a marked improvement over many of our other models. This is a testement to the power of transfer learning. # + [markdown] id="F-o_5D3CuPk8" colab_type="text" # # Diving into our best model # Let's quickly take a look at the outcomes of our predictions, using the Transfer Learning model. # # Here's the loss history plot for our model. It seems like a high degree of overfitting is going on. # # + id="FRQIeEDK0UI9" colab_type="code" outputId="81e2a156-161f-43d5-c7dd-b233e580f5be" colab={"base_uri": "https://localhost:8080/", "height": 286} # import imageio from tqdm.auto import tqdm import matplotlib.pyplot as plt im = imageio.imread('/content/drive/My Drive/AM216/geospatial_analysis/history.png') plt.imshow(im) # + id="aco4nvPEujSd" colab_type="code" outputId="0390dcdd-96e8-4c42-99c0-9df5bde181ff" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["78cad7b29fe8413e9f61fe8de0d7b39a", "9124e0d7aaf4416ea5a6feeb254ba6f0", "28fcc93993f548c5910b66a1cc0d058c", "81ead2f87d8247f49a9b243256654e0d", "e8554fd887bf40c1b721494a1ed55c7f", "cd9bc1b656124c89b021ce139a2110bd", "2fb40b80285a4285b3339a484c8f913e", "8c444a4291b843f4940963b2d26274b5"]} max_diff = 0; real_pop = 0 predicted_pop = 0 for i in tqdm(range(18)): x,y = next(train_generator) preds = transfer_model.predict(x) diff = np.abs(preds.flatten() - y) local_max = np.max(diff) if local_max > max_diff: max_diff = local_max real_pop = y[np.argmax(diff)] predicted_pop = preds[np.argmax(diff)] # + id="maEvBV1V1hCk" colab_type="code" outputId="abbc1db5-23a0-45a4-d804-fca4b7a61085" colab={"base_uri": "https://localhost:8080/", "height": 77} pop_df[pop_df.population == 6652.0] # + id="yMQobmj31zeQ" colab_type="code" outputId="2f5c2b06-414d-4d47-ebbb-ffbeb3982052" colab={"base_uri": "https://localhost:8080/", "height": 286} im = imageio.imread('/content/drive/My Drive/summer_resized/25017340100.png') plt.imshow(im) # + [markdown] id="MRpQZQkR_BNx" colab_type="text" # Here's the tract that our model performed the worst on. It looks like it's a large, suburban neighborhood. Perhaps the large amount of green space confused the model, and if we had a larger dataset and/or higher definition photos, the model would behave better. # + [markdown] id="dc5M-yPW5BHD" colab_type="text" # # Unsuccessful Forays # We made a number of attempts to improve the performance of our models. # + [markdown] id="gfFXM8dD5UAF" colab_type="text" # ## Adding area of tract as a second input # We thought that perhaps an issue with our data was the fact that each image of a tract was the same size, despite tracts ranging widely in size. Thus we created a new data generator which combined the image as well as the area of the tract. Below is the code for creating such a generator. # # # + id="CvW3C1qG5ONY" colab_type="code" colab={} train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255, validation_split=0.2) def combined_generator(subset, width, height, batchsize): img_gen = train_datagen.flow_from_dataframe(dataframe=pop_df, directory=image_dir, x_col="filename", y_col="population", has_ext=True, class_mode="other", target_size=(width, height), batch_size=batchsize,subset=subset, shuffle=True) while True: idx = (img_gen.batch_index) * img_gen.batch_size filenames = img_gen.filenames[idx : idx + img_gen.batch_size] area_list = pop_df[pop_df['filename'].isin(filenames)].area.values X1 = next(img_gen) yield ([X1[0],area_list], X1[1]) train_generator = combined_generator(subset='training', width=IMG_WIDTH, height=IMG_HEIGHT, batchsize=BATCH_SIZE) validation_generator = combined_generator(subset='validation', width=IMG_WIDTH, height=IMG_HEIGHT, batchsize=BATCH_SIZE) # + [markdown] id="F9mnrx5lpzoH" colab_type="text" # ## Adding a learning rate scheduler to fit the models # # We experimented with adding a learning rate scheduler to improver model performance. A learning scheduler audo-adjusts the learninng rate per epoch to performance at each epoch of training. For us, the learning rate scheduler started with a train learning rate of 0.001, and validation learning rate of 0.0001. we found that although it improved train MSE, it did not improve validation MSE at the same rate, resulting in overfitting. As a result, we left it out of our final models. You can incorporate a learning rate scheduler as demonstrated below. # + id="lC-3e-Kjp0Sx" colab_type="code" colab={} # functions for learning rate scheduler def get_lr_metric(optimizer): # to print out learning rate during model fitting def lr(y_true, y_pred): return optimizer.lr return lr def scheduler(epoch): # exponential decay of learning rate initial_lrate = 0.0001 if epoch < 5: lrate = initial_lrate else: lrate = initial_lrate*math.exp(0.1*(5-epoch)) return lrate # compile desired model optimizer = Adam() lr_metric = get_lr_metric(optimizer) unet_model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy', lr_metric]) # Define learning rate scheduler lrate = LearningRateScheduler(scheduler) callbacks_list = [lrate] # Fit unet_model_history = unet_model.fit(train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data = validation_generator, validation_steps=STEP_SIZE_VALID, callbacks=callbacks_list, epochs=15) # + [markdown] id="TT8SHIl05yj0" colab_type="text" # # Discussion of Results # ### Potential Pitfalls # A number of reasons may have hindered us. The highest resolution satellite image data is not free, and has more than 10x the resolution of the data that we used. In using this DigitalGlobe data, we could have conducted our analyses at the smaller "census block" level. Perhaps this would have been more successful, and the models would have learned more abou the nature of human habitation. # # Given the time it took to train our models, we limited ourselves to data from Massachusetts, as opposed to the entire United States. It would have been more interesting to see how our approach fared on a broader dataset, and perhaps our models would have done better in this case, as there is only so much a model can learn from fewer than 2000 total images. # # Hence, computational power was a significant limitation, and though we attempted to get around it by spinning up GPU EC2 instances as opposed to training within colab, we still did not have access large, multi-GPU instance, which could have allowed us to look at more data. # # Finally, there is the foundational difficulties that come with the problem of predicitng populations based on landscape images alone. For example, intuitively we might think of buildings as correlating with higher population density. This may be true in many places such as suburban neighborhoods or areas with dense apartment complex clusters, but financial districts in city centers may -- on the contrary -- be replete with buildings, but have near-zero population counts in the census. Put another way, what features readily distinguish an apartment skyscraper from a commercial office space skyscraper? # # ### FFNN # We created a FFNN with only a few dense layers as a basleine model to compare our other prediction results to. We found that the model had a poor validation of loss of ~3752094, or in other words, each estimate of population was, on average, off by 1,937 people. We anticipated this model to perform poorly, as it was not tweaked for optimal performance. # # ### Simple CNN models # # We found that the CNN models with fewer layers were able to predict population with less average population count error per census track than FFNN (average error of ~1500 people). We were surprised that the average error wasn't too far off, given the simple CNN archtiecture, and that the task of detecting highly populated areas purely from satellite images is relatively difficult. There were a decent amount of satellite images without much population (i.e. Western MA), and so we think that the model would more easily tell the difference between populated vs unpopulated areas. We think that the CNN models with less layers has a limit with how accuractely they could predict (could not achieve average population area less than ~1500 people), and so these models were limited. # # ### Deep Sequential Model and UNet Models: # # The deep sequential and UNet models both consisted of more layers. The difference between the two is that the UNet model's decoder contains skip connection layers, which combine the output of feature maps of the encoder with previous convolution layers. We were disappointed to see that these deeper models did not boost the performance of the simpler models, again stalling around the same ~1500 limit as described for those more shallow models. # # ### Transfer Learning Model # # The transfer learning model, built on top of MobileNet was the most performant of our models. It should still be noted that this peak performance is still an error range of +/- ~1000 people per tract, which still isn't great. However, this demonstrates the wide applicability of transfer learning, even when not attempting to do classification. # # # # Conclusion # Overall through this project, we were able to explore the GEE database, build a data pipeline to obtain the satellite images, perform data preprocssing to pad the satellite images and combine demographic data (from CenPy), and build multiple neural network models to predict population. To conclude, we were successful in building a pipeline and models that analyzed and predicted based off of satellite imagery. However, we found that training on ~1400 images was not enough for the model to learn weights that could accurately predict population. We also found that the quality of images affects performance, and that a more complex architecture (UNet model) does not necessarily outperform a simpler architecture (simple CNN models). Regardless of our models not performing incredibly well, we were able to experiment with different models and learn how to interact with GEE. We found the inverse problem of learning population from satellite images to be an interesting, challenging, and rewarding task to solve. # # + id="1_CGW9tc5vQS" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Field Integration Techniques # # Many analysis techniques for vector fields require solving an initial value problem for an arbitrary set of seed points and evaluating such solutions at a chosen resolution. Kamodo makes it easy to generate fieldline solutions by providing a function decorator that wraps scipy's powerful `solve_ivp` function. Each family of solutions is represented by a single function of a complex parameter. We illustrate the flexibility of this approach in the example below. # + # initialize from plotly.offline import iplot, plot, init_notebook_mode # init_notebook_mode(connected = True) from kamodo import Kamodo, event, pointlike, kamodofy, solve import numpy as np import pandas as pd # - # ## Dipole field model # # We use the following dipole field model that can accept (m,) and (1,m), and (n,m) arrays. # + def Bdip(rvec): """Need math to work in a variety of arg shapes""" muvec = Bdip.muvec r = np.linalg.norm(rvec, axis = 1) r[r==0] = np.nan try: rhat = rvec/r except: rhat = (rvec.T/r).T try: result = 3*np.dot(rhat, muvec.T) except: result = 3*np.dot(rhat.T, muvec.T).T result = (rhat.T*result).T try: result = result - muvec except: result = (result - muvec.T).T try: result = result/r**3 except: result = (result.T/r**3).T return result # set dipole moment Bdip.muvec = np.array([0, 0, -1]) # pointlike enforces dimensionality Bdip = pointlike(Bdip, '(n,m)->(n,m)', [np.float], squeeze = 0) # - kamodo = Kamodo() kamodo['Bvec'] = Bdip # register the dipole field kamodo # ## Normalization # # Instead of solving the initial value problem on the original field, we will be solving on the normalized field. This will mean that the integral path is the same as the arclength, allowing us to control the visual fidelity of the resulting field. # Create a normalization function to be applied to our field # + @kamodofy(equation = "$$\\hat{n}(\\vec{y}) = \\vec{y}/\\sqrt{\\vec{y} \\cdot \\vec{y}} $$") @pointlike(signature = '(m,n)->(m,n)', squeeze = 0) def normalized(yvec): r = np.linalg.norm(yvec, axis = 1) r[r==0] = np.nan try: return yvec/r except: return (yvec.T/r).T kamodo['nhat'] = normalized # - # Create a normalized field kamodo['bhat'] = "nhat(Bvec)" kamodo # ## Solving the initial value problem # Generate a set of seed points for integration # + x0 = np.linspace(-np.pi,np.pi,6) y0 = np.linspace(-np.pi,np.pi,6) z0 = 1 seeds = np.array(np.column_stack([c.ravel() for c in np.meshgrid(x0,y0,z0)])) # - # Create a stopping boundary for field line integrator @event def boundary(s, rvec): r = np.linalg.norm(rvec) if np.isnan(r): result = 0 else: result = r - 1 return result # Solve the initial value problem for the normalized field kamodo['svec'] = solve(kamodo.bhat, # the field to be solved seeds, # the initial positions 's', # the name of the integration parameter (0,30), # the span to integrate over npoints = 60, # the number of points to evaluate the solution events = boundary, # stop at the boundary ) kamodo # The solver returns a family of solutions, represented as a single function of a complex array, $\vec{s}(s)$ where # $s$ is a complex array. # ## Evaluating the Solutions # On evaluation, $\vec{s}(s)$ returns a pandas dataframe. kamodo.svec().head() # When using the default argument above, the solution evaluates at a resolution of npoints/span, stopping at the boundary. # ## Complex parameterization # Kamodo represents the family of solutions to the initial value problem as a single function of a complex array. # # The floor of the real part of the input parameter corresponds to the original seed array: kamodo.svec([0,1,2]).values # compare with original seeds: seeds[[0,1,2]] # The imaginary part denotes the integral along the corresponding solution. Here, we can choose evaluation points that were not in the original solution. Parameters outside the original span will be extrapolated. kamodo.svec([-6j, -5j, 0, 5j, 6j, 4 + 4j, 4 -5.777j]) # ## Plotting Fieldlines # # We can quickly generate plots for all fieldlines at the default resolution by calling plot with the name of the fieldlines solution. import plotly.io as pio fig = kamodo.plot('svec') pio.write_image(fig, 'images/fieldlines.svg') # ![images/fieldlines.svg](images/fieldlines.svg) # To show the direction of the field at each point, we can evaluate $\hat{B}(\vec{s}(s))$ fig = kamodo.plot('svec', Bhat = dict(rvec = kamodo.svec())) pio.write_image(fig,'images/fieldlines_vectors.svg') # ![fieldlines](images/fieldlines_vectors.svg) # ## Integration totals # To compute the total integral for each fieldline individually, we need a function to subtract the integration results at the endpoints. def integral(fieldline): endpoints = fieldline.reset_index().integral.iloc[[0,-1]] return endpoints.values[-1] - endpoints.values[0] # + totals = [] for seed, fieldline in kamodo.svec().groupby(level = 'seed'): totals.append(integral(fieldline)) totals[:5] # - # Alternatively, we can use pandas' aggregation methods to apply our function on each fieldline. pd.DataFrame.groupby.a kamodo.svec().groupby(level='seed').aggregate(integral) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} from tensorflow.examples.tutorials.mnist import input_data from PIL import Image import matplotlib.image import os # + pycharm={"name": "#%%\n", "is_executing": false} mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # + pycharm={"name": "#%%\n", "is_executing": false} save_dir = "MNIST_data/raw/" if os.path.exists(save_dir) is False: os.makedirs(save_dir) # + pycharm={"name": "#%%\n", "is_executing": false} # 保存前面的20张图片 for i in range(20): image_array = mnist.train.images[i, :].reshape(28, 28) filename = save_dir+'mnist_train_%d.jpg' % i matplotlib.image.imsave(filename, image_array, vmin=0.0, vmax=1.0) # + pycharm={"name": "#%%\n"} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cutting Semantic Corners! # This work interests various measures: # - Average word frequency # - Average word concreteness # - Average word depth in WordNet import pandas as pd import numpy as np import matplotlib.pyplot as plt import math from scipy import stats from nltk.corpus import wordnet as wn #We already have a processed file address = "/Users/yuribizzoni/Downloads/ttr__lexdens_nouns_PLM.xlsx" ttr_file = pd.read_excel(address) #ttr_file len(ttr_file["ORG_WR_EN_lem"]) # + #ttr_file # - #Concreteness - we use a dictionary eng_co = pd.read_excel("/Users/yuribizzoni/Downloads/Translation/B7/tools/Concreteness_ratings_Brysbaert_et_al_BRM.xlsx") # column names list(eng_co) #eng_co #this dictionary contains almost 40000 different words len(eng_co["Word"]) #We make it into a dictionary of concreteness scores subset = eng_co[['Word', 'Conc.M']] conc_values = dict(subset.get_values()) # + ## computing concreteness measures categories = "ORG_WR_EN,TR_DE_EN,ORG_SP_EN,SI_DE_EN".split(",") overlaps = [] fr_subsets = [] mean_concreteness_scores = [] mean_concreteness_scores_frequency = [] percen_very_concretes = [] percen_very_abstracts = [] con_fre_sco = [] c=0 for category in categories: print(category) subset_fr = ttr_file[[category+"_lem", "Anzahl_"+str(c)]] c+=1 fr_values = dict(subset_fr.get_values()) print(len(fr_values.keys())) fr_subsets.append((category,fr_values)) prv, cons, fres = [],[], [] very_con, very_abs = [],[] for w in fr_values.keys(): if w in conc_values.keys(): conc_score = conc_values[w] fre = fr_values[w] if fre>-1: # we add a word's conc. score proportionally to the number of times it occurs in the corpus # this allows us to perform a real t-test later on prv+=[conc_score]*int(fre) cons.append(conc_score) fres.append(fre) if conc_score>4: very_con.append(conc_score) # very concrete set (concreteness>4) if conc_score<2: very_abs.append(conc_score) # very abstract set (concreteness<2) print("Mean concreteness score: ",np.mean(cons), "Std concreteness score: ",np.std(cons)) n_words = len(fr_values.keys()) mean_concreteness_scores_frequency.append(np.mean(prv)) mean_concreteness_scores.append(np.mean(cons)) percen_very_concretes.append(len(very_con)/n_words) percen_very_abstracts.append(len(very_abs)/n_words) con_fre_sco.append(prv) print("\n") # - for i in range(4): print(categories[i]) print("Percentage very abstract words: "+str(percen_very_abstracts[i])) print("Percentage very concrete words: "+str(percen_very_concretes[i])) print("Mean type concreteness score: "+str(mean_concreteness_scores[i])) print("Mean token concreteness score: "+str(mean_concreteness_scores_frequency[i])) print("\n") # + plt.figure(figsize=(20,10)) N = 4 OVERALL_BAR_WIDTH = .35 SENTIMENT_Y_LABEL2 = 'Percent' SENTIMENT_TITLE2 = 'Category and concreteness' abstra = [x for x in percen_very_abstracts] concre = [x for x in percen_very_concretes] labs = categories ind = np.arange(N) width = OVERALL_BAR_WIDTH #p2 = plt.bar(ind, neu, width) p3 = plt.bar(ind, concre, width, bottom=0) p1 = plt.bar(ind, abstra, width, bottom=concre) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') #plt.legend((p1[0], p2[0], p3[0]), ('Negative', 'Neutral', 'Positive')) plt.legend((p1[0], p3[0]), ('Very abstract (>4/5)', 'Very concrete (<2/5)')) #plt.savefig("/Users/yuribizzoni/abstract_concrete_difference.png") plt.show() # + N = len(mean_concreteness_scores_frequency) SENTIMENT_Y_LABEL2 = "Concreteness > 3.1" SENTIMENT_TITLE2 = 'Mean concreteness, frequency biased' plt.figure(figsize=(15,8)) plt.bar(np.arange(N), [x-3.1 for x in mean_concreteness_scores_frequency]) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') #plt.yticks(ticks=[.3,.35,.4]) #plt.savefig("/Users/yuribizzoni/mean_concreteness_freqbiased.png") plt.show() # + #Differences are small. Are they statistically significant? # + #T test comparing each concreteness distribution with each other #(p<.05 means statistically significant) # con_fre_sco: concreteness frequency scores for each word in a category for array in con_fre_sco: for other_array in con_fre_sco: if array != other_array: print(stats.ttest_ind(array, other_array)) # + #subset_fr # + #Other analyses #subset_fr # + #High frequency vs low frequency words hig_perces, low_perces = [],[] hig_low_ratio = [] for category in categories: print(category) subset_fr = ttr_file[[category+"_lem", category+"_fre"]] fr_values = dict(subset_fr.get_values()) high_counts = [v for v in fr_values.values() if v>=.85] # all_counts = [v for v in fr_values.values() if v>-1] print("mean frequency", np.mean(all_counts)) low_counts = [v for v in fr_values.values() if v<0.05] # hig_per = len(high_counts)/len(all_counts) low_per = len(low_counts)/len(all_counts) print("percentage low frequency words: ", low_per) print("percentage high frequency words: ", hig_per) hig_perces.append(hig_per) low_perces.append(low_per) high_low_r = len(high_counts)/len(low_counts) print("High to low frequency words ratio: ", high_low_r) hig_low_ratio.append(high_low_r) print("\n") # + SENTIMENT_Y_LABEL2 = "Ratio" SENTIMENT_TITLE2 = 'High/Low frequency words ratio' plt.bar(np.arange(N), [x for x in hig_low_ratio]) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') #plt.savefig("/Users/yuribizzoni/frequency_ratio") plt.show() # + #WordNet depth_of_frequents = [] depth_of_rares = [] avg_depth = [] depths_arrays = [] #extra = open("/Users/yuribizzoni/Downloads/B7/WN_depth_English.csv","w") #extra.write("CORPUS\tWN_DEPTH\tWN_DEPTH_calibrated\tFREQUENCY\n") c=0 for category in categories: subset_fr = ttr_file[[category+"_lem", category+"_fre"]] c+=1 fr_values = dict(subset_fr.get_values()) fr_subsets.append((category,fr_values)) prv, frequents, rares = [],[],[] for w in fr_values.keys(): w = str(w) syn = wn.synsets(w) # we take all synsets containing that word if len(syn)>0 and w!='nan': fre = fr_values[w] # frequency of the word # we take the mean depth of the synsets containing the word dep_ca = np.mean([s.max_depth() for s in syn][-1]) tot = dep_ca if fre>-1: # as before, we add a proportial number of scores # in order to compute a t-test later prv+=[tot]*int(fre*100) ###WE ARE USING MPF FREQ, so it is necessary to *100 or more if fre>=1: frequents+=[tot]*int(fre*100) if fre<.1: rares+=[tot]*int(fre*100) depths_arrays.append(frequents) avg_depth.append(np.mean(prv)) depth_of_frequents.append(np.mean(frequents)) depth_of_rares.append(np.mean(rares)) print("done") # - syn[0].max_depth() depth_of_frequents depth_of_rares # + SENTIMENT_Y_LABEL2 = "Mean depth (-5.5)" SENTIMENT_TITLE2 = 'Depth of frequent words' plt.bar(np.arange(N), [(x-5.5) for x in depth_of_frequents]) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') #plt.savefig("/Users/yuribizzoni/depfreq.png", bbox_inches="tight") plt.show() # + SENTIMENT_Y_LABEL2 = "Mean depth (-5.5)" SENTIMENT_TITLE2 = 'Depth of rare words' plt.bar(np.arange(N), [x-5.5 for x in depth_of_rares]) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') #plt.savefig("/Users/yuribizzoni/deprare.png", bbox_inches="tight") plt.show() # + SENTIMENT_Y_LABEL2 = "Mean depth (-5.5)" SENTIMENT_TITLE2 = 'Depth of words' plt.bar(np.arange(N), [x-5 for x in avg_depth]) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') plt.show() # - [1] # + ##t test. This time, some contrasts are significant, others are not. # Some are close to the threshold (e.g. .08), others are far away. for i in range(len(depths_arrays)): array = depths_arrays[i] for j in range(len(depths_arrays)): other_array = depths_arrays[j] if array != other_array: ttest = stats.ttest_ind(array, other_array) print(labs[i], '->', labs[j], ttest) if ttest[1]<.05: print("<<< Significant\n") # - # + plt.figure(figsize=(20,10)) OVERALL_BAR_WIDTH = .35 SENTIMENT_Y_LABEL2 = 'Avg Depth' SENTIMENT_TITLE2 = 'Category and WN depth' N = len(depth_of_frequents) avg = [x for x in avg_depth] freq = [x for x in depth_of_frequents] rare = [x for x in depth_of_rares] labs = categories #sorted([x[0] for x in sent]) ind = np.arange(N) width = OVERALL_BAR_WIDTH p3 = plt.bar(ind, freq, width, bottom=rare) p1 = plt.bar(ind, rare, width, bottom=avg) p2 = plt.bar(ind, avg, width, bottom=0) plt.ylabel(SENTIMENT_Y_LABEL2) plt.title(SENTIMENT_TITLE2) plt.xticks(ind, labs, rotation='vertical') plt.legend((p1[0], p2[0], p3[0]), ('Rare', 'Average', 'Frequent')) #plt.legend((p1[0], p3[0]), ('Very frequent (>.9)', 'Average')) #plt.savefig("/Users/yuribizzoni/generality_per_frequency_class.png") plt.show() # - [round(x,3) for x in freq] # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Representations of the Wave Equation and its Solutions # # *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the masters course Selected Topics in Audio Signal Processing, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [](mailto:).* # ## Time-Domain # # The inhomogeneous wave equation in the time-domain is given as # # \begin{equation} # \Delta p(\mathbf{x}, t) - \frac{1}{c^2} \frac{\partial^2}{\partial t^2} p(\mathbf{x}, t) = - q(\mathbf{x}, t) . # \end{equation} # # The wave equation is a linear partial differential equation (PDE) with constant coefficients. # ## Monochromatic # # Let's assume a monochromatic real-valued excitation $q(\mathbf{x}, t) = Q(\mathbf{x}, \omega_0) \cos(\omega_0 t)$ with angular frequency $\omega_0 = 2 \pi f_0$. Due to the linearity of the wave equation, the following Ansatz is chosen for the sound pressure $p(\mathbf{x}, t) = P(\mathbf{x}, \omega_0) \cos(\omega_0 t)$. It is common to use complex calculus for PDEs, hence # # \begin{eqnarray} # q(\mathbf{x}, t) &= \Re \{ Q(\mathbf{x}, \omega_0) e^{j \omega_0 t} \} \\ # p(\mathbf{x}, t) &= \Re \{ P(\mathbf{x}, \omega_0) e^{j \omega_0 t} \} . # \end{eqnarray} # # Introducing the complex quantities into the wave equation yields # # \begin{equation} # \Delta P(\mathbf{x}, \omega_0) e^{j \omega_0 t} + \left( \frac{\omega_0}{c} \right)^2 P(\mathbf{x}, \omega_0) e^{j \omega_0 t} = - Q(\mathbf{x}, \omega_0) e^{j \omega_0 t} . # \end{equation} # # The complex exponential function can be canceled resulting in # # \begin{equation} # \Delta P(\mathbf{x}, \omega_0) + \left( \frac{\omega_0}{c} \right)^2 P(\mathbf{x}, \omega_0) = - Q(\mathbf{x}, \omega_0) . # \end{equation} # # For $t=0$ the time-domain solution of the wave equation for $\omega_0$ is given as $p(\mathbf{x}, 0) = \Re \{ P(\mathbf{x}, \omega_0) \}$. It is common to discard the index 0 in the angular frequency. The index was however introduced for the sake of comparison with the temporal Fourier transform of the wave equation discussed in the following. # ## Temporal Fourier Domain # # Temporal Fourier transform of the inhomogeneity and the pressure yields the Helmholtz equation # # \begin{equation} # \Delta P(\mathbf{x}, \omega) + \left( \frac{\omega}{c} \right)^2 P(\mathbf{x}, \omega) = - Q(\mathbf{x}, \omega) , # \end{equation} # # where the differentiation theorem of the Fourier transform has been applied. The link to the monochromatic case can be found by considering a monochromatic excitation # # \begin{equation} # Q(\mathbf{x}, \omega_0) = Q(\mathbf{x}, \omega) \delta(\omega - \omega_0) . # \end{equation} # # Due to the linearity of the wave equation the following Ansatz is chosen # # \begin{equation} # P(\mathbf{x}, \omega_0) = P(\mathbf{x}, \omega) \delta(\omega - \omega_0) . # \end{equation} # # Introducing the right-hand-sides of the latter two equations into the Helmholtz equation yields the same result as for the monochromatic case # # \begin{equation} # \Delta P(\mathbf{x}, \omega_0) + \left( \frac{\omega_0}{c} \right)^2 P(\mathbf{x}, \omega_0) = - Q(\mathbf{x}, \omega_0) # \end{equation} # # Hence, the time-domain solution for a monochromatic excitation for $t=0$ may also be derived from the temporal Fourier transform of the problem by evaluation of $p(\mathbf{x}, \omega) = \Re \{ P(\mathbf{x}, \omega) \}$ for one particular frequency $\omega_0$. # ## Temporal Excitation # # Let's consider the solution $P_\delta(\mathbf{x}, \omega)$ of the Helmholtz equation for the excitation with a temporal Dirac impulse $q(\mathbf{x}, t) = q(\mathbf{x}) \delta(t)$ # # \begin{equation} # \Delta P_\delta(\mathbf{x}, \omega) + \left( \frac{\omega}{c} \right)^2 P_\delta(\mathbf{x}, \omega) = - Q(\mathbf{x}) # \end{equation} # # Due to the linearity of the wave equation the pressure field for a generic time-domain excitation $q(\mathbf{x}, t) = q(\mathbf{x}) \hat{q}(t)$ is given as # # \begin{equation} # P(\mathbf{x}, \omega) = P_\delta(\mathbf{x}, \omega) \hat{Q}(\omega) # \end{equation} # # or # # \begin{equation} # p(\mathbf{x},t) = p_\delta(\mathbf{x}, t) * \hat{q}(t) # \end{equation} # **Copyright** # # This notebook is provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text/images/data are licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *, Selected Topics in Audio Signal Processing - Supplementary Material*. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 1042, "output_extras": [{}, {}]} colab_type="code" executionInfo={"elapsed": 23784, "status": "ok", "timestamp": 1520299581088, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="yrFG7EyCYfcq" outputId="ba8bd93b-a9f7-4e27-da24-80942cceca5b" # Remember to turn on GPU!!! # installing packages # !pip install keras --upgrade # !pip install tensorflow --upgrade # !pip install kaggle-cli # install cv2 # !apt-get -qq install -y libsm6 libxext6 && pip install -q -U opencv-python ### Download our data straight from Kaggle and unzip # #!kg download -u "xxx" -p "xxx" -c data-science-bowl-2018 # #!unzip -q stage1_train.zip -d stage1_train # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 106, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 34398, "status": "ok", "timestamp": 1520298599375, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="dszAeqvO1Znv" outputId="21207703-bf5a-498b-f889-6c5026c187bd" # https://stackoverflow.com/questions/48875783/how-to-upload-many-files-to-google-colab #### FIRST WE SHALL FUSE THE DRIVE #### CUZ THATS WHAT YOU DO ON # Install a Drive FUSE wrapper. # https://github.com/astrada/google-drive-ocamlfuse # !apt-get install -y -qq software-properties-common python-software-properties module-init-tools # !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null # !apt-get update -qq 2>&1 > /dev/null # !apt-get -y install -qq google-drive-ocamlfuse fuse # Generate auth tokens for Colab from google.colab import auth auth.authenticate_user() # Generate creds for the Drive FUSE library. from oauth2client.client import GoogleCredentials creds = GoogleCredentials.get_application_default() import getpass # !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL vcode = getpass.getpass() # !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} # Create a directory and mount Google Drive using that directory. # !mkdir -p Drive # !google-drive-ocamlfuse My Drive # Navigate to our project directory import os #os.chdir('/content/Drive/Data Science Bowl 2018') # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 69, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 708, "status": "ok", "timestamp": 1520299656592, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="CoBIQSJE2-tP" outputId="e2b16592-0236-4691-f2d3-358a51f40138" os.chdir('/content/') # !ls # + [markdown] colab_type="text" id="N9PGWDj9XmC1" # # Libraries and Global Parameters # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 3187, "status": "ok", "timestamp": 1520298656827, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="XJa3wWNjXmC2" outputId="f2fd63ab-a333-44b3-c018-ab2d014bed9f" import os import cv2 import pandas as pd import matplotlib.pyplot as plt import numpy as np from skimage.io import imread, imshow, imread_collection, concatenate_images from skimage.util import img_as_bool, img_as_uint, img_as_ubyte, img_as_int from skimage.transform import resize from skimage.morphology import label import random from random import randint from keras import regularizers from keras.models import Model, load_model from keras.optimizers import Adam, SGD, RMSprop from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Activation, Dense, \ UpSampling2D, BatchNormalization, add, Dropout, Flatten, Conv2DTranspose from keras.layers.core import Lambda from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler from keras import backend as K from keras.losses import binary_crossentropy, sparse_categorical_crossentropy ####### UPDATE THIS ######### ############################# model_num = 4 ############################# ############################# model_checkpoint_file= 'unet_v' + str(model_num) +'.h5' submission_filename = 'unet_v' + str(model_num) +'_pred.csv' # Root folders for test and training data train_root = "./stage1_train" test_root = "./stage1_test" # Size we resize all images to #image_size = (128,128) img_height = 128 img_width = 128 img_channels = 1 # 1 for B&W, 3 for RGB import warnings warnings.filterwarnings('ignore', category=UserWarning, module='skimage') #warnings.resetwarnings() # + [markdown] colab_type="text" id="-KwaQ9yuXmC_" # # Preparing the Data # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 121, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 8683, "status": "ok", "timestamp": 1520299676553, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="RKeTtFh8XmDA" outputId="0a4a6c5b-4aaf-4f3a-9658-7ab281e75d88" # Import images (either test or training) # Decolorize, resize, store in array, and save filenames, etc. def import_images(root): dirs = os.listdir(root) filenames=[os.path.join(root,file_id) + "/images/"+file_id+".png" for file_id in dirs] images=[imread(imagefile,as_grey=True) for imagefile in filenames] resized_images = [ resize(image,(img_width,img_height)) for image in images] Array = np.reshape(np.array(resized_images), (len(resized_images),img_height,img_width,img_channels)) #Array = np.reshape(np.array(img_as_ubyte(resized_images),dtype=np.uint8).astype(np.uint8), # (len(resized_images),img_height,img_width,img_channels)) print(Array.mean()) print(Array.std()) # Normalize inputs Array = ((Array - Array.mean())/Array.std()) print(Array.mean()) print(Array.std()) print(images[0].dtype) # print(resized_images[0].dtype) print(Array[0,0,0,0].dtype) return Array, images, filenames, dirs train_X, train_images, train_filenames, train_dirs = import_images(train_root) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="2ZjzF3C9XmDF" ## Import Training Masks # this takes longer than the training images because we have to # combine a lot of mask files # This function creates a single combined mask image # when given a list of masks # Probably a computationally faster way to do this... def collapse_masks(mask_list): for i, mask_file in enumerate(mask_list): if i != 0: # combine mask with previous mask in list mask = np.maximum(mask, imread(os.path.join(train_root,mask_file))) else: # read first mask in mask = imread(os.path.join(train_root,mask_file)) return mask # Import all the masks train_mask_dirs = [ os.path.join(path, 'masks') for path in os.listdir(train_root) ] train_mask_files = [ [os.path.join(dir,file) for file in os.listdir(os.path.join(train_root,dir)) ] for dir in train_mask_dirs] train_masks = [ collapse_masks(mask_files) for mask_files in train_mask_files ] resized_train_masks = [ img_as_bool(resize(image,(img_width,img_height))) for image in train_masks] train_Y = np.reshape(np.array(resized_train_masks),(len(resized_train_masks),img_height,img_width,img_channels)) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 575, "output_extras": [{}, {}]} colab_type="code" executionInfo={"elapsed": 813, "status": "ok", "timestamp": 1520299810382, "user": {"displayName": "", "photoUrl": "//", "userId": "100488807578438696133"}, "user_tz": 300} id="55ciyMCiXmDK" outputId="9854fc3b-1392-4e09-8efe-2d3cd735696c" # Plot images side by side for a list of datasets def plot_side_by_side(ds_list,image_num,size=(15,10)): print('Image #: ' + str(image_num)) fig = plt.figure(figsize=size) for i in range(len(ds_list)): ax1 = fig.add_subplot(1,len(ds_list),i+1) ax1.imshow(ds_list[i][image_num]) plt.show() # Plots random corresponding images and masks def plot_check(ds_list,rand_imgs=None,img_nums=None,size=(15,10)): if rand_imgs != None: for i in range(rand_imgs): plot_side_by_side(ds_list, randint(0,len(ds_list[0])-1),size=size) if img_nums != None: for i in range(len(img_nums)): plot_side_by_side(ds_list,img_nums[i],size=size) plot_check([train_images,train_masks],rand_imgs=1,size=(20,10)) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 121, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 321, "status": "ok", "timestamp": 1520299816699, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="uYSR9-YmXmDQ" outputId="1d7a72ff-1a57-4d8e-df3d-c771d7c31f72" # Check size of arrays we are inputting to model # This is important! We need the datasets to be as # small as possible to reduce computation time # Check physical size print(train_X.shape) print(train_Y.shape) # Check memory size print(train_X.nbytes) print(train_Y.nbytes) # Check datatypes print(train_X.dtype) print(train_Y.dtype) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 322, "output_extras": [{}, {}]} colab_type="code" executionInfo={"elapsed": 627, "status": "ok", "timestamp": 1520299860851, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="fjdJuPrCXmDY" outputId="97800a0d-322c-4462-f291-db2a364c6333" plot_check([np.squeeze(train_X,axis=3),np.squeeze(train_Y,axis=3)],rand_imgs=1,size=(10,7)) # + [markdown] colab_type="text" id="pnWV3DupXmDe" # # Now Let's Build the Model # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 1511, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 1730, "status": "ok", "timestamp": 1520301440187, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="eLeSGZTrXmDj" outputId="6721840a-be4a-4399-e593-6f7c07040ffc" # Loss and metric functions for the neural net def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred = K.cast(y_pred, 'float32') y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32') intersection = y_true_f * y_pred_f score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f)) return score def dice_loss(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = y_true_f * y_pred_f score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) return 1. - score def bce_dice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) ## used for meshnet def create_block(x, filters=21, filter_size=(3, 3), activation='relu',dil_rate=1,dropout_rate=0.25,l2_reg=0): x = Conv2D(filters, filter_size, padding='same', activation=activation, dilation_rate = dil_rate,kernel_regularizer=regularizers.l2(l2_reg)) (x) #x = BatchNormalization() (x) x = Dropout(dropout_rate) (x) return x ## used for dilated unet def encoder(x, filters=44, n_block=3, kernel_size=(3, 3), activation='relu',dropout=0.2): skip = [] for i in range(n_block): x = Conv2D(filters * 2**i, kernel_size, activation=activation, padding='same')(x) x = Dropout(dropout)(x) x = Conv2D(filters * 2**i, kernel_size, activation=activation, padding='same')(x) skip.append(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) return x, skip def bottleneck(x, filters_bottleneck, mode='cascade', depth=6, dropout=0.2, kernel_size=(3, 3), activation='relu'): dilated_layers = [] if mode == 'cascade': # used in the competition for i in range(depth): x = Conv2D(filters_bottleneck, kernel_size, activation=activation, padding='same')(x) x = Dropout(dropout)(x) dilated_layers.append(x) return add(dilated_layers) elif mode == 'parallel': # Like "Atrous Spatial Pyramid Pooling" for i in range(depth): dilated_layers.append( Conv2D(filters_bottleneck, kernel_size, activation=activation, padding='same')(x) ) return add(dilated_layers) def decoder(x, skip, filters, n_block=3, kernel_size=(3, 3), activation='relu',dropout=0.2): for i in reversed(range(n_block)): x = UpSampling2D(size=(2, 2))(x) x = Conv2D(filters * 2**i, kernel_size, activation=activation, padding='same')(x) x = concatenate([skip[i], x]) x = Conv2D(filters * 2**i, kernel_size, activation=activation, padding='same')(x) x = Dropout(dropout)(x) x = Conv2D(filters * 2**i, kernel_size, activation=activation, padding='same')(x) return x ## 3rd place carvana function def get_dilated_unet( input_shape=(img_width, img_height, img_channels), mode='cascade', filters=44, n_block=3, lr=0.0001, loss=binary_crossentropy, n_class=1 ): inputs = Input(input_shape) enc, skip = encoder(inputs, filters, n_block,dropout=0.25) bottle = bottleneck(enc, filters_bottleneck=filters * 2**n_block, mode=mode,depth=2,dropout=0.25) dec = decoder(bottle, skip, filters, n_block,dropout=0.25) classify = Conv2D(n_class, (1, 1), activation='sigmoid')(dec) model = Model(inputs=inputs, outputs=classify) model.compile(optimizer=Adam(lr), loss=loss, metrics=[dice_coef, bce_dice_loss]) return model ### non-dilated unet def Unet(img_size): inputs = Input((img_size, img_size, img_channels)) #s = Lambda(lambda x: x / 255)(inputs) c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(inputs) c1 = Dropout(0.1)(c1) c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c1) p1 = MaxPooling2D((2, 2))(c1) c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p1) c2 = Dropout(0.1)(c2) c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c2) p2 = MaxPooling2D((2, 2))(c2) c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p2) c3 = Dropout(0.2)(c3) c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c3) p3 = MaxPooling2D((2, 2))(c3) c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p3) c4 = Dropout(0.2)(c4) c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c4) p4 = MaxPooling2D(pool_size=(2, 2))(c4) c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p4) c5 = Dropout(0.2)(c5) c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c5) u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5) u6 = concatenate([u6, c4]) c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u6) c6 = Dropout(0.2)(c6) c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c6) u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6) u7 = concatenate([u7, c3]) c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u7) c7 = Dropout(0.2)(c7) c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c7) u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7) u8 = concatenate([u8, c2]) c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u8) c8 = Dropout(0.1)(c8) c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c8) u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8) u9 = concatenate([u9, c1], axis=3) c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u9) c9 = Dropout(0.1)(c9) c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c9) outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9) model = Model(inputs=[inputs], outputs=[outputs]) return model ## master function for creating a net def get_net( input_shape=(img_height, img_width,img_channels), loss=binary_crossentropy, lr=0.001, n_class=1, nb_filters=21, dropout=0.2 ): inputs = Input(input_shape) # Create layers net_body = create_block(inputs,filters=nb_filters,dropout_rate=dropout) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout,dil_rate=2) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout,dil_rate=4) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout,dil_rate=8) net_body = create_block(net_body,filters=nb_filters,dropout_rate=dropout) classify = Conv2D(n_class,(1,1),activation='sigmoid') (net_body) model = Model(inputs=inputs, outputs=classify) model.compile(optimizer=Adam(lr), loss=loss, metrics=[bce_dice_loss, dice_coef]) return model #### CREATE MODEL ########################################################## #my_model = get_net(nb_filters=21,dropout=0.1,loss=binary_crossentropy) #my_model = Unet(img_height) #my_model.compile(optimizer='adam', loss=binary_crossentropy, metrics=[bce_dice_loss, dice_coef]) my_model = get_dilated_unet(filters=16) ############################################################################ print(my_model.summary()) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 228, "output_extras": [{}, {}, {}, {}]} colab_type="code" id="0YKSHHdvXmDt" outputId="922371c5-a9c7-43cb-ecdd-6648b542d9ef" # Fit model earlystopper = EarlyStopping(patience=10, verbose=1) checkpointer = ModelCheckpoint(model_checkpoint_file, verbose=1, save_best_only=True) reduce_plateau = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=4, verbose=1, # min_lr=0.00001, epsilon=0.001, mode='auto') results = my_model.fit(train_X, train_Y, validation_split=0.1, batch_size=1, epochs=100, verbose=1, shuffle=True, callbacks=[ earlystopper, checkpointer, reduce_plateau]) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="_lwSJnsoXmD1" outputId="e7e1286b-1ada-4235-a6b8-4217a2383799" for val_loss in results.history['val_loss']: print(round(val_loss,3)) #print(results.history) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 358, "output_extras": [{}]} colab_type="code" executionInfo={"elapsed": 296, "status": "error", "timestamp": 1520252454689, "user": {"displayName": "", "photoUrl": "//lh6.googleusercontent.com/-Bww_J3dnJNg/AAAAAAAAAAI/AAAAAAAAKZs/M4C7LPPjD_Q/s50-c-k-no/photo.jpg", "userId": "100488807578438696133"}, "user_tz": 300} id="iX-z6WY7XmD7" outputId="df0c4af9-4138-47ad-afd5-5a5d5b849d90" ## Import Test Data and Make Predictions with Model # Import images (either test or training) # Decolorize, resize, store in array, and save filenames, etc. test_X, test_images, test_filenames, test_dirs = import_images(test_root) # Load model and make predictions on test data final_model = load_model(model_checkpoint_file, custom_objects={'dice_coef': dice_coef, 'bce_dice_loss':bce_dice_loss}) preds_test = final_model.predict(test_X, verbose=1) preds_test_t = (preds_test > 0.5) # Create list of upsampled test masks preds_test_upsampled = [] for i in range(len(preds_test)): preds_test_upsampled.append(resize(np.squeeze(preds_test[i]), (test_images[i].shape[0], test_images[i].shape[1]), mode='constant', preserve_range=True)) preds_test_upsampled_bool = [ (mask > 0.5).astype(bool) for mask in preds_test_upsampled ] # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}, {}, {}]} colab_type="code" id="-yMYKFt6XmEA" outputId="0d3f3969-12e5-4b92-c61b-3c8b36fa7b92" plot_check([test_images,preds_test_upsampled,preds_test_upsampled_bool],rand_imgs=2) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="UaUq0uy7XmEE" # Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python def rle_encoding(x): dots = np.where(x.T.flatten() == 1)[0] run_lengths = [] prev = -2 for b in dots: if (b>prev+1): run_lengths.extend((b + 1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def prob_to_rles(x, cutoff=0.5): lab_img = label(x > cutoff) for i in range(1, lab_img.max() + 1): yield rle_encoding(lab_img == i) def generate_prediction_file(image_names,predictions,filename): new_test_ids = [] rles = [] for n, id_ in enumerate(image_names): rle = list(prob_to_rles(predictions[n])) rles.extend(rle) new_test_ids.extend([id_] * len(rle)) sub = pd.DataFrame() sub['ImageId'] = new_test_ids sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x)) sub.to_csv(filename, index=False) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="eNPDX-WrXmEH" generate_prediction_file(test_dirs,preds_test_upsampled_bool,submission_filename) # + [markdown] colab_type="text" id="bAAnlobdXmEO" # Ideas # - Experiment with compression of training data. Am I preserving as much detail # as I can in dtype np.uint8 (values of 0 to 255) ? # - Color vs B&W? # - Combine mask and prediction images to show false positives and negatives # - What is the best resizing method? Reflect?? # - Put computer vision / threshold method output as an input to neural net # - Output intermediate layers for inspection # - Crop images to train networks faster for testing ?? # - Take random crops of images to create, and then combine outputs in the end # - Is combining the masks really the best thing to do? Should I be keeping the individual cells separate? # - Pseudo-labelled data # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 14_promoter_promoter # 4/6/2021 # # corresponds to new fig 2F # A. promoter promoter: Basically for a promoter-promoter loop, we want to know if both genes are being expressed in the loop or if one is acting purely as an enhancer and it's own gene is not being expressed # # new fig 2G # correlate # loops with target gene expression # # + import pandas as pd import os, glob import numpy as np import matplotlib.pyplot as plt import seaborn as sns save_dir = '../data/processed/fig2' # + normal_tissues = ['Airway','Astrocytes','Bladder','Colon','Esophageal', 'GDSD0', 'GDSD3', 'GDSD6', 'GM12878', 'HMEC', 'Melanocytes', 'Ovarian', 'Pancreas', 'Prostate', 'Renal', 'Thyroid', 'Uterine'] # - loop_files = glob.glob('../data/interim/merged/loops/*csv') loop_files promoter_anchor_files = glob.glob('../data/interim/annon/promoter_anchors/*bed') promoter_anchor_files # + rna_df = pd.read_csv('../data/interim/rna/tissue_tpm_sym.csv',index_col=0) print(rna_df.columns) rna_df[:5] def get_exp(gene, tissue): try: return rna_df.loc[gene, tissue] except: return 0 # colnames(rna_df) # - file = promoter_anchor_files[0] tissue = os.path.basename(file).split('_')[1] print(tissue) p_anchor_df = pd.read_table(file,header=None) p_anchor_df.columns = ['chr_g','start_g','stop_g', 'gene','chr_a','start_a','stop_a', 'anchor_name','dist'] p_anchor_df['tpm'] = p_anchor_df.gene.apply(lambda g: get_exp(g, tissue)) p_anchor_df = p_anchor_df[['anchor_name','gene','tpm']] loop_file = os.path.join('../data/interim/merged/loops/',tissue+'.loops.csv') loop_df = pd.read_csv(loop_file, index_col=0) # promoter loop = loop with a gene in both anchor sides loop_df = (loop_df .merge(p_anchor_df,how='left', left_on='source',right_on='anchor_name' ) .merge(p_anchor_df, how='left', left_on='target',right_on='anchor_name' ) .dropna() ) # + def get_type(row, thres=1): """ types: 1: both promoters are expression 2: only one promoter is expressed 3: no promoters are expressed """ if row['tpm_x']>thres: if row['tpm_y']>thres: return 1 else: return 2 else: if row['tpm_y']>thres: return 2 else: return 3 loop_df['P_P_type'] = loop_df.apply(get_type,axis=1) # - tissue_P_P_counts = pd.DataFrame(loop_df.P_P_type.value_counts().sort_index()).T tissue_P_P_counts.index = [tissue] tissue_P_P_counts sns.clustermap(loop_df[['gene_x','gene_y','P_P_type']][loop_df.P_P_type==1] .drop_duplicates() .pivot(index='gene_x',columns='gene_y',values='P_P_type') .fillna(0)) # + def get_first_gene(row): if row['gene_x']1) & (loop_bi.loop_count>0)] loop_bi['tpm'] = loop_bi.tpm.clip(0,10) loop_bi['loop_count'] = loop_bi.loop_count.clip(0,100) loop_bi.plot.scatter(x='tpm',y='num_loops') loop_bi.plot.scatter(x='log_tpm',y='num_loops') loop_bi.plot.scatter(x='log_tpm',y='log_loop_count') # %%time data_all = pd.read_csv('../data/processed/tissue_crms/all_count_comb_overall.csv') data_all[:5] data_all = data_all.iloc[:,[0,1,2,3,4]] data_all.columns = ['gene','tissue','tpm','num_loop_counts', 'num_loops'] data_all = data_all.fillna(0) data_all.sort_values('gene',inplace=True) # + sns.set_style("whitegrid") def get_scatter_gene(gene, df=data_all, get_text=False): df_sel = df[df.gene==gene].reset_index() ax = sns.scatterplot(data=df_sel, x='tpm',y='num_loops') if get_text: added_x = max(df_sel.tpm)/10 for line in range(0,df_sel.shape[0]): plt.text(df_sel.tpm[line]+added_x, df_sel.num_loops[line], df_sel.tissue[line], horizontalalignment='left', size='medium', color='black', weight='semibold') ax.set_xlabel('TPM') ax.set_ylabel('# loops') ax.set_title('Loop-expression correlation: '+ gene) plt.show() return df_sel def get_per_gene_r(gene, df=data_all): df_sel = df[df.gene==gene].reset_index() r = np.corrcoef(df_sel.tpm, df_sel.num_loops)[0,1] if np.isnan(r): return 0 return r # - data_all[(data_all.tpm>20)&(data_all.tissue=='GDSD6')].gene.unique()#.shape df_sel = get_scatter_gene('ADAMTS18') df_sel = data_all[data_all.gene=='ADAMTS13'].reset_index() display(df_sel) np.corrcoef(df_sel.tpm, df_sel.num_loops) # %%time corr_df = pd.DataFrame() corr_df['gene'] = data_all.gene.unique() corr_df['r'] = corr_df.gene.apply(get_per_gene_r) corr_df.to_csv(os.path.join(save_dir, 'corr_df.csv')) corr_df_filt = corr_df[corr_df.r!=0] corr_df_filt['r2'] = corr_df_filt.r.apply(lambda x: x**2) print(corr_df_filt.r2.describe()) ax = sns.distplot(corr_df_filt.r2) ax.set_title('distribution of r^2 of tpm vs # loops for each gene') plt.savefig(os.path.join(save_dir, 'corr_df_filt_dist.pdf')) # df_sel = get_scatter_gene('NFATC1',get_text=True) # df_sel = get_scatter_gene('FOSB',get_text=True) # df_sel = get_scatter_gene('ADAMTS13',get_text=True) # df_sel = get_scatter_gene('ABCA1',get_text=True) # df_sel = get_scatter_gene('PRMT1',get_text=True) # df_sel = get_scatter_gene('SOX9',get_text=True) # df_sel = get_scatter_gene('YY1',get_text=True) genes = corr_df_filt[(corr_df_filt.r2<.8)&(corr_df_filt.r2>.5)].gene.sort_values().values print(len(genes)) for x in genes: print(x) # df_sel = get_scatter_gene(x,get_text=True) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Qiyr2zLNVVdP" # # Initialize # + id="ymUcQ-nGQfQk" import pandas as pd import pickle import torch from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import numpy as np import torch.nn as nn import torch.nn.functional as F from tqdm import tqdm, tqdm_notebook import torch.optim as optim from torch.utils.data import DataLoader, Dataset import gensim from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix, roc_auc_score from sklearn.preprocessing import LabelEncoder, OneHotEncoder from collections import defaultdict from nltk.data import load from tqdm import tqdm import time from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings("ignore") import os import nltk import spacy import networkx as nx nlp = spacy.load("en_core_web_sm") data_path = 'data' train_df = pd.read_csv(os.path.join(data_path, 'train.csv')) validation_df = pd.read_csv(os.path.join(data_path, 'validation.csv')) test_df = pd.read_csv(os.path.join(data_path, 'test.csv')) pretrained_w2v = gensim.models.Word2Vec.load(os.path.join(data_path, 'word2vec_100_10_5.model')) MAX_LEN = 80 SHORT_MAX_LEN = 25 MAX_WORDS = 30000 OOV_TOKEN = 'OOV' TRUNCATE_MODE = 'post' PADDING_MODE = 'post' EMBEDDING_SIZE = 100 class DynamicDataset(Dataset): def __init__(self, sequences, features, short_sequences, labels): self.sequences = sequences self.features = features self.short_sequences = short_sequences self.labels = labels def __getitem__(self, i): return (self.sequences[i], self.features[i], self.short_sequences[i], self.labels[i]) def __len__(self): return len(self.sequences) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") seed = 0 torch.manual_seed(0) np.random.seed(1) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False with open(os.path.join(data_path, 'sequences_labels.pkl'), 'rb') as handle: [train_sequences, train_features, train_sp_sequences, train_labels, val_sequences, val_features, val_sp_sequences, val_labels, test_sequences, test_features, test_sp_sequences, test_labels, propheno_sequences, propheno_features, propheno_labels] = pickle.load(handle) with open(os.path.join(data_path, 'tokenizer.pkl'), 'rb') as handle: tokenizer = pickle.load(handle) vocab_size = len(tokenizer.word_index) weights_matrix = np.zeros((vocab_size+1, EMBEDDING_SIZE)) for i, word in enumerate(tokenizer.word_index, start=1): try: weights_matrix[i] = pretrained_w2v.wv[word] except KeyError: weights_matrix[i] = np.random.normal(scale=0.6, size=(EMBEDDING_SIZE, )) train = DynamicDataset(train_sequences, train_features, train_sp_sequences, train_labels) validation = DynamicDataset(val_sequences, val_features, val_sp_sequences, val_labels) test = DynamicDataset(test_sequences, test_features, test_sp_sequences, test_labels) seed = 0 class MultiCnn(nn.Module): def __init__(self, vocab_size, embedding_size): torch.manual_seed(seed) super(MultiCnn, self).__init__() ### Original Sentence self.word_embeddings = nn.Embedding(vocab_size, embedding_size) self.word_embeddings.weight.data.copy_(torch.from_numpy(weights_matrix)) self.conv1 = nn.Conv1d(embedding_size, 64, 3) self.drop1 = nn.Dropout(0.5) self.max_pool1 = nn.MaxPool1d(2) self.flat1 = nn.Flatten() self.conv2 = nn.Conv1d(embedding_size, 64, 5) self.drop2 = nn.Dropout(0.5) self.max_pool2 = nn.MaxPool1d(2) self.flat2 = nn.Flatten() ### Shortest Path self.s_word_embeddings = nn.Embedding(vocab_size, embedding_size) self.s_word_embeddings.weight.data.copy_(torch.from_numpy(weights_matrix)) self.s_conv1 = nn.Conv1d(embedding_size, 64, 3) self.s_drop1 = nn.Dropout(0.3) self.s_max_pool1 = nn.MaxPool1d(2) self.s_flat1 = nn.Flatten() self.s_conv2 = nn.Conv1d(embedding_size, 64, 5) self.s_drop2 = nn.Dropout(0.3) self.s_max_pool2 = nn.MaxPool1d(2) self.s_flat2 = nn.Flatten() ### Concatenate self.fc1 = nn.Linear(64*98, 100) self.drop4 = nn.Dropout(0.2) self.fc2 = nn.Linear(100, 64) self.drop5 = nn.Dropout(0.2) self.fc3 = nn.Linear(64, 1) def forward(self, sentence, features, shortest): embedding = self.word_embeddings(sentence).permute(0, 2, 1) short_embedding = self.s_word_embeddings(shortest).permute(0, 2, 1) conv1 = F.relu(self.conv1(embedding)) drop1 = self.drop1(conv1) max_pool1 = self.max_pool1(drop1) flat1 = self.flat1(max_pool1) conv2 = F.relu(self.conv2(embedding)) drop2 = self.drop2(conv2) max_pool2 = self.max_pool2(drop2) flat2 = self.flat2(max_pool2) short_conv1 = F.relu(self.s_conv1(short_embedding)) short_drop1 = self.s_drop1(short_conv1) short_max_pool1 = self.s_max_pool1(short_drop1) short_flat1 = self.s_flat1(short_max_pool1) short_conv2 = F.relu(self.s_conv2(short_embedding)) short_drop2 = self.s_drop2(short_conv2) short_max_pool2 = self.s_max_pool2(short_drop2) short_flat2 = self.s_flat2(short_max_pool2) cat = torch.cat((flat1, flat2, short_flat1, short_flat2), dim=1) fc1 = F.relu(self.fc1(cat.view(len(sentence), -1))) drop4 = self.drop4(fc1) fc2 = F.relu(self.fc2(drop4)) drop5 = self.drop5(fc2) fc3 = torch.sigmoid(self.fc3(drop5)) return fc3 class BiLSTMShort(nn.Module): def __init__(self, vocab_size, embedding_size): torch.manual_seed(seed) super(BiLSTMShort, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, embedding_size) self.word_embeddings.weight.data.copy_(torch.from_numpy(weights_matrix)) self.bi_lstm1 = nn.LSTM(embedding_size, 32, bidirectional=True) self.bi_lstm2 = nn.LSTM(embedding_size, 32, bidirectional=True) self.fc1 = nn.Linear(64*105, 100) self.drop1 = nn.Dropout(0.2) self.fc2 = nn.Linear(100, 64) self.drop2 = nn.Dropout(0.2) self.fc3 = nn.Linear(64, 1) def forward(self, sentence, features, shortest): embedding = self.word_embeddings(sentence) short_embedding = self.word_embeddings(shortest) lstm_out1, hidden1 = self.bi_lstm1(embedding) short_lstm_out1, short_hidden1 = self.bi_lstm2(short_embedding) cat = torch.cat((lstm_out1.permute(0, 2, 1), short_lstm_out1.permute(0, 2, 1)), dim=2) fc1 = F.relu(self.fc1(cat.view(len(sentence), -1))) drop1 = self.drop1(fc1) fc2 = F.relu(self.fc2(drop1)) drop2 = self.drop2(fc2) fc3 = torch.sigmoid(self.fc3(drop2)) return fc3 def print_performance(preds, true_labels): print('Precision: {0:4.3f}, Recall: {1:4.3f}, F1: {2:4.3f}, AUROC: {3:4.3f}'.format(precision_score(true_labels, preds), recall_score(true_labels, preds), f1_score(true_labels, preds), roc_auc_score(true_labels, preds))) print('tn={0:d}, fp={1:d}, fn={2:d}, tp={3:d}'.format(*confusion_matrix(true_labels, preds).ravel())) print('{0:4.3f} {1:4.3f} {2:4.3f} {3:4.3f}'.format(precision_score(true_labels, preds), recall_score(true_labels, preds), f1_score(true_labels, preds), roc_auc_score(true_labels, preds))) def train_model(model, dataset, epochs=20, echo=False): criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.001) loader = DataLoader(dataset, batch_size=32) # model.train() for epoch in range(epochs): model.train() progress = tqdm_notebook(loader, leave=False) # tqdm_notebook for inputs, features, short, target in progress: model.zero_grad() output = model(inputs.to(device), features.to(device), short.to(device)) loss = criterion(output, target.to(device)) loss.backward() optimizer.step() if echo: print(epoch, loss) return model def concatenate_sequences(sequences, features, shorts, labels, added_sequences, added_features, added_shorts, added_labels): sequences = torch.cat((sequences, added_sequences)) features = np.concatenate((features, added_features)) shorts = np.concatenate((shorts, added_shorts)) labels = torch.cat((labels, added_labels)) return sequences, features, shorts, labels def eval_model(model, dataset, indices=None, return_binary=False, threshold=None): if indices is not None: dataset = DynamicDataset(dataset[indices][0], dataset[indices][1], dataset[indices][2], dataset[indices][3]) loader = DataLoader(dataset, batch_size=32) predictions , true_labels = [], [] model.eval() cnt = 0 for batch in loader: batch = tuple(t.to(device) for t in batch) inputs, features, shorts, labels = batch with torch.no_grad(): logits = model(inputs.to(device), features.to(device), shorts.to(device)) logits = logits.detach().cpu().numpy() label_ids = labels.to('cpu').numpy() predictions.append(logits) true_labels.append(label_ids) cnt += 1 if threshold and cnt == threshold: break predictions = [item for sublist in predictions for item in sublist] if return_binary: predictions = np.array([1 if pred[0] > 0.5 else 0 for pred in predictions]) labels = [item[0] for sublist in true_labels for item in sublist] return predictions, labels def print_stats(dataset): print('Length of input dataset: {0:d}'.format(len(dataset))) print('Positive instances: {0:d} ({1:4.2f}), Negative instances: {2:d} ({3:4.2f})'.format(sum(dataset.labels == 1)[0], int(sum(dataset.labels == 1)[0]) / len(dataset), sum(dataset.labels == 0)[0], int(sum(dataset.labels == 0)[0]) / len(dataset))) # + [markdown] id="C_ml0_pPVe6L" # # Experiments # + id="QdzeSskZRA6B" seed = 0 torch.manual_seed(seed) np.random.seed(1) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False train = DynamicDataset(train_sequences, train_features, train_sp_sequences, train_labels) def run_model(network): model = network(vocab_size+1, EMBEDDING_SIZE) model.cuda() EPOCHS = 20 train_model(model, train, epochs=EPOCHS, echo=False) return model time1 = time.time() rnn_model = run_model(BiLSTMShort) time2 = time.time() print(time2 - time1) time1 = time.time() cnn_model = run_model(MultiCnn) time2 = time.time() print(time2 - time1) rnn_predictions, true_labels = eval_model(rnn_model, test, return_binary=True) print_performance(rnn_predictions, true_labels) cnn_predictions, true_labels = eval_model(cnn_model, test, return_binary=True) print_performance(cnn_predictions, true_labels) # + id="Oo2uEsFX2EfL" with open(os.path.join(data_path, 'pppred_bert_probabilities_validation_test.pkl'), 'rb') as handle: [flat_predictions_val, flat_predictions_test] = pickle.load(handle) # + id="p8w92SjXqPrL" from sklearn.linear_model import LogisticRegression rnn_val_predictions, true_labels = eval_model(rnn_model, validation, return_binary=False) rnn_val_predictions = np.array(rnn_val_predictions) cnn_val_predictions, true_labels = eval_model(cnn_model, validation, return_binary=False) cnn_val_predictions = np.array(cnn_val_predictions) probabilities = clf.predict_proba(vec.transform(validation_df['Sentence'])) lr = LogisticRegression() lr.fit(np.concatenate((rnn_val_predictions, cnn_val_predictions, probabilities[:,1].reshape(-1,1), flat_predictions_val.reshape(-1,1)), axis=1), val_labels) rnn_test_predictions, true_labels = eval_model(rnn_model, test, return_binary=False) rnn_test_predictions = np.array(rnn_test_predictions) cnn_test_predictions, true_labels = eval_model(cnn_model, test, return_binary=False) cnn_test_predictions = np.array(cnn_test_predictions) probabilities_test = clf.predict_proba(vec.transform(test_df['Sentence'])) lr_preds = lr.predict(np.concatenate((rnn_test_predictions, cnn_test_predictions, probabilities_test[:,1].reshape(-1,1), flat_predictions_test.reshape(-1,1)), axis=1)) lr_preds = np.array(lr_preds) true_labels = np.array(true_labels) print_performance(lr_preds, true_labels) # + id="IqJkmY2AKjV4" abs_idx = np.where(test_df['type'] == 'abs') ft_idx = np.where(test_df['type'] == 'ft') print_performance(preds, true_labels) print_performance(preds[abs_idx], true_labels[abs_idx]) print_performance(preds[ft_idx], true_labels[ft_idx]) print('*' * 50) print_performance((flat_predictions_test > 0), test_labels) print_performance((flat_predictions_test > 0)[abs_idx], test_labels[abs_idx]) print_performance((flat_predictions_test > 0)[ft_idx], test_labels[ft_idx]) print('*' * 50) print_performance(lr_preds, true_labels) print_performance(lr_preds[abs_idx], true_labels[abs_idx]) print_performance(lr_preds[ft_idx], true_labels[ft_idx]) print('*' * 50) print_performance(rnn_predictions, true_labels) print_performance(rnn_predictions[abs_idx], true_labels[abs_idx]) print_performance(rnn_predictions[ft_idx], true_labels[ft_idx]) print('*' * 50) print_performance(cnn_predictions, true_labels) print_performance(cnn_predictions[abs_idx], true_labels[abs_idx]) print_performance(cnn_predictions[ft_idx], true_labels[ft_idx]) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.ticker import MaxNLocator plt.rcParams['mathtext.fontset'] = 'stix' from sgoopT import stationary, mu_factor, eigenval, sg_transmat, sgap from cmap1dT import SgoopDistance import scipy.optimize as opt # - # # Variables # + # data: filename={} s={} weights={} mids={} prob={} freeE={} # MD parameters: beta=2.5 Rout=100 # (unit: steps/lines) # metaD parameters: stride={} wbias={} delta_beta={} # SGOOP parameters: rc_bin={} wells={} th={} coeff={} rc={} pi={} MU={} S={} E={} binedges={} # - # # First SGOOP # ### Read trajectory files # + # Read unbiased data: filename['unbiased']='./DATA_aladip/unbiased_T300/COLVAR' s['unbiased'] = np.loadtxt(filename['unbiased'], unpack=True, usecols=(4,5,6,7,8,9), skiprows=7, max_rows=50000) # Read biased data: filename['biased_1']='./DATA_aladip/biased1_T300/COLVAR' s['1']=np.loadtxt(filename['biased_1'], unpack=True, usecols=(4,5,6,7,8,9), skiprows=7) # - # ### Read reweighting factors filename['weights']='./DATA_aladip/biased1_T300/weights_aladip_biased1.txt' weights['1']=np.loadtxt(filename['weights']) # ### Spectral gap optimization: Find optimal RC $\chi^{(1)}$ # + def compute_sgap(coeff_i, rc_bin_i=20, wells_i=2, weights_i=None): # function to be used in basinhopping. records function evaluations and returns -spectral gap global nfev nfev +=1 rc['unbiased']=np.dot(coeff_i, s['unbiased'][:,:50000]) rc['i'] = np.dot(coeff_i, s['1']) pi['i'], _ = stationary(rc['i'], rc_bin=rc_bin_i, weights=weights['1']) MU['i'] = mu_factor(rc['unbiased'], pi['i'], rc_bin=rc_bin_i, max_d=1) S['i'] = sg_transmat(rc_bin_i, pi['i'], MU['i'], max_d=1) return sgap(S['i'], wells_i) def print_fun(x, f, accepted): global now,last,nfev,lastf now=time.time() print(x,end=' ') if accepted == 1: print("with spectral gap %.4f accepted after %3i runs (%.3f)" % (-f, nfev-lastf, now-last)) else: print("with spectral gap %.4f declined after %3i runs (%.3f)" % (-f, nfev-lastf, now-last)) last=now lastf=nfev start = time.time() coeff['1']=np.array([0.02, 0.97, -0.25, -0.02, 0.5, 0.5]) rc_bin['1']=50 wells['1']=3 def opti_func(coeff_i): return -compute_sgap(coeff_i, rc_bin_i=rc_bin['1'], wells_i=wells['1']) last = start lastf = nfev = 0 minimizer_kwargs = {"options": {"maxiter":10}} ret = opt.basinhopping(opti_func, coeff['1'], niter=50, T=.01, stepsize=.1, minimizer_kwargs=minimizer_kwargs, callback=print_fun) end = time.time() print(end-start) ret # - # ### Exp(eigenvalues) and 0th eigenvectors of MaxCal-based rate matrix for optimal RC $\chi^{(1)}$ # + # parameters: rc_bin['1']=50 coeff['1']=np.array([0.64325837, 0.778393 , -0.13339705, -0.08794336, -0.22147228, -0.16453171]) # data: rc['unbiased'] = np.dot(coeff['1'], s['unbiased'][:,:50000]) rc['1'] = np.dot(coeff['1'], s['1']) pi['1'], binedges['1'] = stationary(rc['1'], rc_bin=rc_bin['1'], weights=weights['1']) MU['1'] = mu_factor(rc['unbiased'], pi['1'], rc_bin=rc_bin['1'], max_d=1) S['1'] = sg_transmat(rc_bin['1'], pi['1'], MU['1'], max_d=1) E['1'] = eigenval(S['1']) eval1, exp_eval1, evec1 = E['1'] rate1 = -eval1 # Plottings: fig, ax=plt.subplots(figsize=(8,4), nrows=1, ncols=2) ax[0].scatter(list(range(len(E['1'][1]))), E['1'][1]) ax[1].plot(E['1'][2][:,0]-np.min(E['1'][2][:,0])) ax[0].tick_params(which='major', axis='both', direction='in', labelsize=16, pad=1) ax[0].set_xlabel('Index $i$', size=16) ax[0].set_ylabel('$\lambda^{(1)}$', size=20).set_rotation(0) ax[0].yaxis.set_label_coords(-0.17, 0.5) ax[0].set_xlim(-0.5, rc_bin['1']+0.5) ax[1].tick_params(which='major', axis='both', direction='in', labelsize=16, pad=1) ax[1].set_xlabel('Index $i$', size=16) ax[1].set_ylabel('$\psi^{(1)}_0$', size=20).set_rotation(0) ax[1].yaxis.set_label_coords(-0.17, 0.5) ax[1].set_xlim(-0.5, rc_bin['1']+0.5) fig.tight_layout() plt.show() # - # # Second SGOOP # Read biased data: filename['biased_2']='./DATA_aladip/biased2_T300/COLVAR' s['2']=np.loadtxt(filename['biased_2'], unpack=True, usecols=(4,5,6,7,8,9), skiprows=7) # ### Spectral gap optimization: Find optimal RC $\chi^{(2)}$ # + def compute_sgap(coeff_i, rc_bin_i=20, wells_i=2, weights_i=None): # function to be used in basinhopping. records function evaluations and returns -spectral gap global nfev nfev +=1 rc['unbiased']=np.dot(coeff_i, s['2'][:,50000:200000]) rc['i'] = np.dot(coeff_i, s['2']) pi['i'], _ = stationary(rc['i'], rc_bin=rc_bin_i) MU['i'] = mu_factor(rc['unbiased'], pi['i'], rc_bin=rc_bin_i, max_d=1) S['i'] = sg_transmat(rc_bin_i, pi['i'], MU['i'], max_d=1) return sgap(S['i'], wells_i) def print_fun(x, f, accepted): global now,last,nfev,lastf now=time.time() print(x,end=' ') if accepted == 1: print("with spectral gap %.4f accepted after %3i runs (%.3f)" % (-f, nfev-lastf, now-last)) else: print("with spectral gap %.4f declined after %3i runs (%.3f)" % (-f, nfev-lastf, now-last)) last=now lastf=nfev start = time.time() coeff['2']=np.array([0.2, 0.5, -0.25, -0.2, -0.5, 0.5]) rc_bin['2']=50 wells['2']=3 def opti_func(coeff_i): return -compute_sgap(coeff_i, rc_bin_i=rc_bin['2'], wells_i=wells['2']) last = start lastf = nfev = 0 minimizer_kwargs = {"options": {"maxiter":10}} ret = opt.basinhopping(opti_func, coeff['1'], niter=100, T=.01, stepsize=.1, minimizer_kwargs=minimizer_kwargs, callback=print_fun) end = time.time() print(end-start) ret # - # ### Exp(eigenvalues) and 0th eigenvectors of MaxCal-based rate matrix for optimal RC $\chi^{(2)}$ # + # parameters: rc_bin['2']=50 coeff['2']=np.array([0.82665355, 1.16592255, -0.11967506, 0.57802668, 0.01336444, 0.24045454]) # data: rc['unbiased'] = np.dot(coeff['2'], s['2'][:,50000:200000]) rc['2'] = np.dot(coeff['2'], s['2']) pi['2'], binedges['2'] = stationary(rc['2'], rc_bin=rc_bin['2']) MU['2'] = mu_factor(rc['unbiased'], pi['2'], rc_bin=rc_bin['2'], max_d=1) S['2'] = sg_transmat(rc_bin['2'], pi['2'], MU['2'], max_d=1) E['2'] = eigenval(S['2']) eval2, exp_eval2, evec2 = E['2'] rate2 = -eval2 # Plottings: fig, ax=plt.subplots(figsize=(8,4), nrows=1, ncols=2) ax[0].scatter(list(range(len(E['2'][1]))), E['2'][1]) ax[1].plot(E['2'][2][:,0]-np.min(E['2'][2][:,0])) ax[0].tick_params(which='major', axis='both', direction='in', labelsize=16, pad=1) ax[0].set_xlabel('Index $i$', size=16) ax[0].set_ylabel('$\lambda^{(2)}$', size=20).set_rotation(0) ax[0].yaxis.set_label_coords(-0.17, 0.5) ax[0].set_xlim(-0.5, rc_bin['2']+0.5) ax[1].tick_params(which='major', axis='both', direction='in', labelsize=16, pad=1) ax[1].set_xlabel('Index $i$', size=16) ax[1].set_ylabel('$\psi^{(2)}_0$', size=20).set_rotation(0) ax[1].yaxis.set_label_coords(-0.17, 0.5) ax[1].set_xlim(-0.5, rc_bin['2']+0.5) fig.tight_layout() plt.show() # - # # SGOOP-d # + sgd1 = SgoopDistance(coeff['1'], eval1, evec1, binedges['1']) sgd2 = SgoopDistance(coeff['2'], eval2, evec2, binedges['2']) def rate1xd(pos1, pos2, K=1, num_eig=rc_bin['1']): """ Calculate rate1 * d_comm. """ d_comm1 = sgd1.pairwise_d(pos1, pos2, num_eig) d_comm2 = sgd2.pairwise_d(pos1, pos2, num_eig) d_hat1 = rate1[1]*d_comm1 d_hat2 = rate2[1]*d_comm2 return d_hat1 + K*d_hat2 # + C7eq1=(-2.8, 2.9, 0.3) C7eq2=(-1.25, 1.0, 0.3) C7ax=(1.2, -0.8, 0.3) A=( np.cos(C7eq1[0]), np.sin(C7eq1[0]), np.cos(C7eq1[1]), np.sin(C7eq1[1]), np.cos(C7eq1[2]), np.sin(C7eq1[2]) ) B=( np.cos(C7eq2[0]), np.sin(C7eq2[0]), np.cos(C7eq2[1]), np.sin(C7eq2[1]), np.cos(C7eq2[2]), np.sin(C7eq2[2]) ) C=( np.cos(C7ax[0]), np.sin(C7ax[0]), np.cos(C7ax[1]), np.sin(C7ax[1]), np.cos(C7ax[2]), np.sin(C7ax[2]) ) K_ast=rate1[1]/rate2[1] kd=np.array([rate1xd(A,B, K_ast), rate1xd(A,C, K_ast), rate1xd(B,C, K_ast)]) # Plottings: K_arr=np.arange(0,200,1) fig, ax=plt.subplots(figsize=(8,4), nrows=1, ncols=2) ax[0].plot([K_ast]*3, np.linspace(-10,800,3), 'k--') ax[0].plot(K_arr, rate1xd(A,B,K_arr), 'b-', label='AB') ax[0].plot(K_arr, rate1xd(A,C,K_arr), 'r-', label='AC') ax[0].plot(K_arr, rate1xd(B,C,K_arr), 'g--', label='BC') ax[1].scatter(np.arange(kd.shape[0]), kd, c=['b', 'r', 'g']) ax[0].tick_params(axis='both', which='major', direction='in', labelsize=16) ax[0].set_xlabel('$K$', size=20) ax[0].set_ylabel('$k^{(1)}_1 d_K$', size=20) ax[0].set_xlim(0,2.5) ax[0].set_ylim(-10,160) ax[0].legend(loc='upper right', fontsize=16) ax[1].tick_params(which='major', axis='both', direction='in', labelsize=16) ax[1].set_xticks([0, 1, 2]) labels = [item.get_text() for item in ax[1].get_xticklabels()] ax[1].set_xticklabels(['AB', 'AC', 'BC']) ax[1].set_ylim(-10,160) fig.tight_layout() plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.11 ('base') # language: python # name: python3 # --- # # all data models mypath = "./Data/DataModel/" from os import listdir from os.path import isfile, join, basename onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] print(onlyfiles) import data_model as dm print(basename(mypath + onlyfiles[0])) #dm = dm.DataModel(dm.json_file_to_data_model(mypath + onlyfiles[0])) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (Cell Painting) # language: python # name: cell-painting # --- # + import os import sys proj_root = os.getcwd() + "/.." os.chdir(proj_root) sys.path.append(proj_root) # + import numpy as np import pandas as pd import seaborn as sns from tifffile import imread import matplotlib.pyplot as plt from src.models.config import Config plt.rcParams["figure.figsize"] = (25,5) sns.set_theme(style="whitegrid") config = Config() metadata = pd.read_csv(config.dataset_metadata) compounds = metadata['compound_name'].unique() for compound in compounds: rows = metadata.loc[metadata['compound_name'] == compound] fig, axes = plt.subplots(1, 5) fig.suptitle(f"{compound} - Distribution of the n-th percentile in individual images, per channel", fontsize=20) for ax_index, percentile in enumerate([99, 98, 97, 95, 93]): channel_values_above_percentile = [[], [], [], []] for index, row in rows.iterrows(): image = imread(config.data_root_dir / row['filename']) for channel in range(4): channel_data = image[:,:,channel].flatten() channel_values_above_percentile[channel].append(np.percentile(channel_data, percentile)) for channel in range(4): channel_values_above_percentile[channel] = np.log10(np.array(channel_values_above_percentile[channel])) plot_data = pd.DataFrame(zip(*channel_values_above_percentile), columns=["Channel 1", "Channel 2", "Channel 3", "Channel 4"]) plot = sns.violinplot(data=plot_data, palette="coolwarm", ax=axes[ax_index]) plot.set_title(f"{percentile}th percentile") plot.set_yticks(np.arange(2, 5.5, 0.5)) plot.set_ylim([2, 5]) if ax_index == 0: plot.set_ylabel("log10 of percentiles", fontsize=15) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="t16VscFQJWTM" colab_type="text" # ## Reading File # + id="qFFRlxtmssr8" colab_type="code" outputId="693875c2-848b-422f-f067-2b31de8e410f" executionInfo={"status": "ok", "timestamp": 1561804249607, "user_tz": -360, "elapsed": 39904, "user": {"displayName": "", "photoUrl": "", "userId": "10838423032874605851"}} colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive import numpy as np import pandas as pd drive.mount('/content/gdrive') bengali_news_after_preprocessing = pd.read_pickle('/content/gdrive/My Drive/Projects/Bengali Text Classification/Bengali_Text_after_preprocessing.pkl') # + [markdown] id="qjNZIT_pJbmb" colab_type="text" # ## X contains the numpy array format of text # + id="eb9BPsWD3nNU" colab_type="code" colab={} X = bengali_news_after_preprocessing.iloc[:,:].values # + [markdown] id="KOs4hECjJhXz" colab_type="text" # ## Following line removes nonetype values from numpy array # + id="jhq5JkkF8MP0" colab_type="code" colab={} X = X[X != np.array(None)] # + [markdown] id="i4HrtllZJr48" colab_type="text" # ## Following lines counts the tokens in data and sort them # + id="NlCYgPQI98QE" colab_type="code" colab={} unique, idx, cnt = np.unique(X, return_index = True, return_counts=True) # + id="5iwbhSCc_geR" colab_type="code" colab={} from operator import itemgetter cnt = 1 / cnt sorted_vocab = sorted(zip(unique, cnt, idx), key=itemgetter(1, 2)) # + id="ZmvEk0Fp_hnh" colab_type="code" colab={} unique, count, index = zip(*sorted_vocab) # + id="3B9MOKP-ADYf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c16a5ae4-0dba-4ad0-ebbc-e07e41eafa85" executionInfo={"status": "ok", "timestamp": 1561810490167, "user_tz": -360, "elapsed": 1201, "user": {"displayName": "", "photoUrl": "", "userId": "10838423032874605851"}} len(unique) # + [markdown] id="GDfNitGXkqjA" colab_type="text" # ## Finally vocabularies are saved in vocab.txt file # + id="6FuCZNM9H2hx" colab_type="code" colab={} with open('/content/gdrive/My Drive/Projects/Bengali Text Classification/vocab.txt', 'w') as vocab: for i in unique: vocab.write(i+'\n') / -*- coding: utf-8 -*- / --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / kernelspec: / display_name: SQL / language: sql / name: SQL / --- / + [markdown] azdata_cell_guid="e01663cc-427c-457f-84db-b16d0fca3a90" / # Analyze NY taxi data / In this tutorial, we will perform exploratory data analysis by combining different Azure Open Datasets using serverless SQL and then visualizing the results in Azure Data Studio. In particular, you analyze the [New York City (NYC) Taxi dataset](https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/ "https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/"). / You can learn more about the meaning of the individual columns in the descriptions of the [NYC Taxi](https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/ "https://azure.microsoft.com/services/open-datasets/catalog/nyc-taxi-limousine-commission-yellow-taxi-trip-records/"), [Public Holidays](https://azure.microsoft.com/services/open-datasets/catalog/public-holidays/ "https://azure.microsoft.com/services/open-datasets/catalog/public-holidays/"), and [Weather Data](https://azure.microsoft.com/services/open-datasets/catalog/noaa-integrated-surface-data/ "https://azure.microsoft.com/services/open-datasets/catalog/noaa-integrated-surface-data/") Azure open datasets. / Let's first get familiar with the NYC Taxi data by running the following query: / + azdata_cell_guid="dbc4f12e-388c-49fa-9d85-0fbea3b19d1b" SELECT TOP 10 * FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/nyctlc/yellow/puYear=*/puMonth=*/*.parquet', FORMAT='PARQUET' ) AS [nyc] / + [markdown] azdata_cell_guid="a373fa76-bfdf-4bb6-8098-73c9ef436eb8" / Similarly, we can explore the Public Holidays dataset: / + azdata_cell_guid="48b6ee55-09ec-47df-bea5-707dc2f42aa8" SELECT TOP 10 * FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/holidaydatacontainer/Processed/*.parquet', FORMAT='PARQUET' ) AS [holidays] / + [markdown] azdata_cell_guid="c4145b77-8663-4e59-914b-721955a02635" / Lastly, we can also explore the Weather Data dataset by using the following query: / + azdata_cell_guid="f3da158c-c168-45b0-8e38-7ee2d430420f" SELECT TOP 10 * FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/isdweatherdatacontainer/ISDWeather/year=*/month=*/*.parquet', FORMAT='PARQUET' ) AS [weather] / + [markdown] azdata_cell_guid="745b2c81-01eb-4bf5-9cad-47a03dcff194" / ## Time series, seasonality, and outlier analysis / / We can easily summarize the yearly number of taxi rides by using the following query: / + azdata_cell_guid="e7bacd03-45d4-4b0b-b1d0-9522e1a54436" SELECT YEAR(tpepPickupDateTime) AS current_year, COUNT(*) AS rides_per_year FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/nyctlc/yellow/puYear=*/puMonth=*/*.parquet', FORMAT='PARQUET' ) AS [nyc] WHERE nyc.filepath(1) >= '2009' AND nyc.filepath(1) <= '2019' GROUP BY YEAR(tpepPickupDateTime) ORDER BY 1 ASC / + [markdown] azdata_cell_guid="4397f453-4b20-4083-ae0e-4966d789993f" / The data can be visualized in Azure Data Studio by switching from the Table to the Chart view. You can choose among different chart types, such as Area, Bar, Column, Line, Pie, and Scatter.  / / From this visualization, a trend of a decreasing number of rides over years can be clearly seen. Presumably, this decrease is due to the recent increased popularity of ride-sharing companies. / / Next, let's focus the analysis on a single year, for example, 2016. The following query returns the daily number of rides during that year: / + azdata_cell_guid="e01007a4-adda-460a-83e0-e45a789c80cb" SELECT CAST([tpepPickupDateTime] AS DATE) AS [current_day], COUNT(*) as rides_per_day FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/nyctlc/yellow/puYear=*/puMonth=*/*.parquet', FORMAT='PARQUET' ) AS [nyc] WHERE nyc.filepath(1) = '2016' GROUP BY CAST([tpepPickupDateTime] AS DATE) ORDER BY 1 ASC / + [markdown] azdata_cell_guid="455f1a05-87bc-4a83-8856-7ac9adc76af0" / Again, we can easily visualize data by plotting the Column chart with the Category column set to `current_day` and the Legend (series) column set to `rides_per_day`. / / From the plot chart, you can see there's a weekly pattern, with Saturdays as the peak day. During summer months, there are fewer taxi rides because of vacations. Also, notice some significant drops in the number of taxi rides without a clear pattern of when and why they occur. / / Next, let's see if the drops correlate with public holidays by joining the NYC Taxi rides dataset with the Public Holidays dataset: / + azdata_cell_guid="710cb813-a14d-4daa-8436-7a0086e4381f" WITH taxi_rides AS ( SELECT CAST([tpepPickupDateTime] AS DATE) AS [current_day], COUNT(*) as rides_per_day FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/nyctlc/yellow/puYear=*/puMonth=*/*.parquet', FORMAT='PARQUET' ) AS [nyc] WHERE nyc.filepath(1) = '2016' GROUP BY CAST([tpepPickupDateTime] AS DATE) ), public_holidays AS ( SELECT 500000 as holiday, date FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/holidaydatacontainer/Processed/*.parquet', FORMAT='PARQUET' ) AS [holidays] WHERE countryorregion = 'United States' AND YEAR(date) = 2016 ) SELECT * FROM taxi_rides t LEFT OUTER JOIN public_holidays p on t.current_day = p.date ORDER BY current_day ASC / + [markdown] azdata_cell_guid="a2160c53-23d2-4243-94ad-7c3408c49080" / From the plot chart, you can see that during public holidays the number of taxi rides is lower. There's still one unexplained large drop on January 23. Let's check the weather in NYC on that day by querying the Weather Data dataset: / + azdata_cell_guid="aa7087b1-fb8d-43cc-a580-afa1ff6d9741" SELECT AVG(windspeed) AS avg_windspeed, MIN(windspeed) AS min_windspeed, MAX(windspeed) AS max_windspeed, AVG(temperature) AS avg_temperature, MIN(temperature) AS min_temperature, MAX(temperature) AS max_temperature, AVG(sealvlpressure) AS avg_sealvlpressure, MIN(sealvlpressure) AS min_sealvlpressure, MAX(sealvlpressure) AS max_sealvlpressure, AVG(precipdepth) AS avg_precipdepth, MIN(precipdepth) AS min_precipdepth, MAX(precipdepth) AS max_precipdepth, AVG(snowdepth) AS avg_snowdepth, MIN(snowdepth) AS min_snowdepth, MAX(snowdepth) AS max_snowdepth FROM OPENROWSET( BULK 'https://azureopendatastorage.blob.core.windows.net/isdweatherdatacontainer/ISDWeather/year=*/month=*/*.parquet', FORMAT='PARQUET' ) AS [weather] WHERE countryorregion = 'US' AND CAST([datetime] AS DATE) = '2016-01-23' AND stationname = 'JOHN F KENNEDY INTERNATIONAL AIRPORT' / + [markdown] azdata_cell_guid="be7f6d54-0301-4fbd-be89-56472649e6f9" / The results of the query indicate that the drop in the number of taxi rides occurred because: / / - There was a blizzard on that day in NYC with heavy snow (~30 cm). / - It was cold (temperature was below zero degrees Celsius). / - It was windy (~10 m/s). / / This tutorial has shown how a data analyst can quickly perform exploratory data analysis, easily combine different datasets by using serverless Synapse SQL pool, and visualize the results by using Azure Data Studio. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CVXOPT # ## Task 1 # Machine Learning tasks are typically thought of optimization problems, e.g. minimizing an error function or maximizing a probability. Ideally, the optimization problem turns out to be convex, which implies that any local minimum is the global minimum of the formulation, and what is even more important, we can. In the following, it will be assumed that you have some basic knowledge about convex optimization. The intention of this task is to familiarize ourselves with CVXOPT, one of the most-widely used convex optimization toolboxes. # a) Go to `cvxopt.org` and follow the installation instructions for your distribution. For conda, you need to run # `conda install -c conda-forge cvxopt` # b) Skim through the **Examples** section on `cvxopt.org` to get an overview of the functionality of the different solvers of CVXOPT. from cvxopt import matrix, solvers import numpy as np # c) Implement a function `minsq` which expects a NumPy array `A` of shape `(m,n)` and a NumPy array `y` of shape `(m,)` as its arguments and returns a NumPy array `x` of shape `(n,)` that solves the following problem. # #
    $\mathrm{min_\mathbf{x}} \|\mathbf A\mathbf{x}-\mathbf{y}\|$.
    # # Test your function by feeding it with appropriate inputs and comparing the results with the ones you get by using `np.linalg.pinv`. Experiment by adding white Gaussian noise to `y`. If CVXOPT does not accept your NumPy arrays, try casting them to `double`. # + def minsq(A, y): P=matrix(np.dot(A.T,A).astype('double')) q=matrix(-np.dot(A.T,y).astype('double')) x=solvers.qp(P,q) return np.array(x['x']) A=np.array([[10, 40],[20, 0],[-30, 40]]) y=np.array([50,20,10])+np.random.randn(3,) print('A:', A) print('y:', y) print('x:', minsq(A,y).squeeze()) print('np.dot(pinv(A),y):', np.dot(np.linalg.pinv(A),y)) # - # d) Consider the equation (8.30) in the lecture notes. Implement a function `solvedualsvm(H,y)` that returns the solution `lambda_star` of the dual SVM problem by means of CVXOPT. def solvedualsvm(H,y): y=y.squeeze() n=len(y) G=-np.eye(n).astype('double') A=y.reshape(1,n).astype('double') h=np.zeros((n,)).astype('double') b=np.zeros((1,)).astype('double') P=H.astype('double') q=-np.ones((n,)).astype('double') lambda_star=solvers.qp(matrix(P),matrix(q),matrix(G),matrix(h),matrix(A),matrix(b)) return lambda_star['x'] # Test your function with the training data # \begin{equation*} # \begin{split} # \mathbf{x}_1=\begin{bmatrix}-1\\-1\end{bmatrix},y_1=-1,&\ \mathbf{x}_2=\begin{bmatrix}-2\\-2\end{bmatrix},y_2=-1,\\ # \mathbf{x}_3=\begin{bmatrix}1\\1\end{bmatrix},y_3=1,&\ \mathbf{x}_4=\begin{bmatrix}2\\2\end{bmatrix},y_4=1,\ # \end{split}. # \end{equation*} # Verify that the KKT conditions with respect to the support vectors are in line with what you expect. In the next lab course, we will use this function to implement linear and kernel SVM. X=np.array([[-1,-2,1,2],[-1,-2,1,2]]) y=np.array([-1,-1,1,1]) H=np.dot(np.dot(np.dot(np.diag(y),X.T),X),np.diag(y)) lambda_star=solvedualsvm(H,y) print(lambda_star) # Only the KKT coefficients that belong to the support vectors are significantly larger than 0. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from itertools import cycle import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import lasso_path, enet_path from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter) # Compute paths eps = 5e-5 # the smaller it is the longer is the path # + print("Computing regularization path using the lasso...") alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False) print("Computing regularization path using the positive lasso...") alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path( X, y, eps, positive=True, fit_intercept=False) # Display results plt.figure(1) colors = cycle(['b', 'r', 'g', 'c', 'k']) neg_log_alphas_lasso = -np.log10(alphas_lasso) neg_log_alphas_enet = -np.log10(alphas_enet) for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and Elastic-Net Paths') plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left') plt.axis('tight') plt.figure(2) neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and positive Lasso') plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left') plt.axis('tight') plt.figure(3) neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet) for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c) l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Elastic-Net and positive Elastic-Net') plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), loc='lower left') plt.axis('tight') plt.show() # + print("Computing regularization path using the elastic net...") alphas_enet, coefs_enet, _ = enet_path( X, y, eps=eps, l1_ratio=0.8, fit_intercept=False) print("Computing regularization path using the positive elastic net...") alphas_positive_enet, coefs_positive_enet, _ = enet_path( X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False) # + # Display results plt.figure(1) colors = cycle(['b', 'r', 'g', 'c', 'k']) neg_log_alphas_lasso = -np.log10(alphas_lasso) neg_log_alphas_enet = -np.log10(alphas_enet) for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and Elastic-Net Paths') plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left') plt.axis('tight') plt.figure(2) neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and positive Lasso') plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left') plt.axis('tight') plt.figure(3) neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet) for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c) l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Elastic-Net and positive Elastic-Net') plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), loc='lower left') plt.axis('tight') plt.show() # - # # l1_ratio=0.4 # # + l1_ratio=0.4 print("Computing regularization path using the elastic net...") alphas_enet, coefs_enet, _ = enet_path( X, y, eps=eps, l1_r=l1_ratio, fit_intercept=False) print("Computing regularization path using the positive elastic net...") alphas_positive_enet, coefs_positive_enet, _ = enet_path( X, y, eps=eps, l1_r=l1_ratio, positive=True, fit_intercept=False) # Display results plt.figure(1) colors = cycle(['b', 'r', 'g', 'c', 'k']) neg_log_alphas_lasso = -np.log10(alphas_lasso) neg_log_alphas_enet = -np.log10(alphas_enet) for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and Elastic-Net Paths') plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left') plt.axis('tight') plt.figure(2) neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and positive Lasso') plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left') plt.axis('tight') plt.figure(3) neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet) for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c) l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Elastic-Net and positive Elastic-Net') plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), loc='lower left') plt.axis('tight') plt.show() # - eps print(neg_log_alphas_enet) x=0 for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): x=x+1 print(c) print(x) print(coef_e, c ) coefs_lasso.shape alphas_enet.shape, coefs_enet.shape # + eps = 5e-3 # the smaller it is the longer is the path # - alphas_enet.shape, coefs_enet.shape X.shape y.shape # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CH 4. Arrays and Vectorized Computation # ## 1. Import Package import numpy as np import matplotlib.pyplot as plt from random import normalvariate import random my_list=list(range(1000000)) my_arr=np.arange(1000000) # %time for _ in range(10) : my_list*2 # %time for _ in range(10) : my_arr*2 # ## 2. Creating ndarray arr=np.random.randn(2,3) arr # + print(arr*10) print(arr+arr) print("shape of data :",arr.shape) print("data type of data :",arr.dtype) print("type of data :" ,type(arr)) # - data1=[6,7.5,8,0,1] arr1=np.array(data1) arr1 data2=[[1,2,3,4],[5,6,7,8]] arr2=np.array(data2) arr2 print("arr1 :",arr1.ndim) print("arr2 :",arr2.ndim) print("arr1 :",arr1.dtype) print("arr2 :",arr2.dtype) # ## 3. dtypes for ndarray # + arr1=np.array([1,2,3],dtype=np.float64) arr2=np.array([1,2,3],dtype=np.int32) print(arr1.dtype) print(arr2.dtype) # - arr=np.array([1,2,3,4,5]) arr.dtype float_arr=arr.astype(np.float64) float_arr.dtype arr=np.array([3.7,-1.2,-2.6,0.5,12.9,10.1]) arr.astype(np.int32) numeric_strings=np.array(['1.25','-9.6','42'],dtype=np.string_) numeric_strings.astype(float) # ## 4. Arithmetic with numpy arrays arr=np.array([[1,2,3],[4,5,6]]) arr print(arr*arr) print(arr-arr) print(1/arr) print(arr**0.5) arr2=np.array([[0,4,1],[7,2,12]]) arr2 print(arr2>arr) # ## 5. Basic indexing and slicing # 1 dimensional arr=np.arange(10) arr print(arr[5]) print(arr[5:8]) arr[5:8]=12 arr arr_slice=arr[5:8] arr_slice arr_slice[1]=12345 arr arr_slice[:]=64 arr # 2 dimensional arr2d=np.array([[1,2,3],[4,5,6],[7,8,9]]) arr2d print(arr2d[0]) print(arr2d[0][2]) print(arr2d[0,2]) # 3 dimensional arr3d=np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) arr3d print(arr3d[0]) print(arr3d[0][1]) print(arr3d[0][1][0]) # selection with slicing arr print(arr[1:6]) arr2d print(arr2d[:2]) print(arr2d[:2,1:]) print(arr2d[1,:2]) # ## 6. Boolean Indexing names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) data=np.random.randn(7,4) print(names) print(data) print("[1]", names=='Bob') print("[2]", data[names=='Bob']) print("[3]", data[names=='Bob',2:]) print("[4]", data[names=='Bob',3]) print("[5]", names!='Bob') print("[6]", data[~(names=='Bob')]) cond = names=='Bob' data[~cond] mask = (names=='Bob') | (names=='Will') mask data[mask] data[data<0]=0 data data[names !='Joe']=7 data # ## 7. Fancy Indexing arr = np.empty((8,4)) arr for i in range(8): arr[i]=i arr print(arr[[4,3,0,6]]) print(arr[[-3,-5,-7]]) arr = np.arange(32).reshape((8,4)) arr print(arr[[1,5,7,2],[0,3,1,2]]) print(arr[[1,5,7,2]][:,[0,3,1,2]]) arr = np.arange(15).reshape((3,5)) print(arr) print(arr.T) arr=np.random.randn(6,3) print(arr) print(np.dot(arr.T,arr)) # ## 8. Universal Function : Fast element-wise array function arr=np.arange(10) print(arr) print(np.sqrt(arr)) print(np.exp(arr)) x=np.random.randn(8) y=np.random.randn(8) print(x) print(y) print(np.maximum(x,y)) print(np.minimum(x,y)) # ## 9. Array-oriented programming with arrays points = np.arange(-5,5,0.01) xs,ys=np.meshgrid(points,points) print(xs) print(ys) z = np.sqrt(xs**2+ys**2) z # + plt.imshow(z, cmap=plt.cm.gray);plt.colorbar() plt.title("Image plot of $\sqrt{x^2+y^2}$ for a gird of values") plt.show() # - # ## 10. Expressing Conditional Logic as array operations xarr = np.array([1.1,1.2,1.3,1.4,1.5]) yarr = np.array([2.1,2.2,2.3,2.4,2.5]) cond = np.array([True,False,True,True,False]) result=[(x if c else y) for x,y,c in zip(xarr,yarr,cond)] result result_=np.where(cond,xarr,yarr) result_ arr=np.random.randn(4,4) arr print(arr>0) print(np.where(arr>0,2,-2)) print(np.where(arr>0,2,arr)) # ## 11. Mathematical and Statistical Methods # + arr=np.random.randn(5,4) print(arr) print(arr.mean()) print(np.mean(arr)) print(arr.sum()) print(arr.mean(axis=1)) print(arr.sum(axis=1)) # + arr = np.array([0,1,2,3,4,5,6,7]) print(arr) print(arr.cumsum()) # + arr = np.array([[0,1,2],[3,4,5],[6,7,8]]) print(arr) print(arr.cumprod(0)) print(arr.cumprod(axis=0)) # - # ## 12. Method for Boolean Arrays # + arr = np.random.randn(100) print(arr) print((arr>0).sum()) # + bools = np.array([False,False,True,False]) print(bools.any()) print(bools.all()) # - # ## 13. Sorting arr = np.random.randn(6) arr arr.sort() arr arr=np.random.randn(5,3) arr arr.sort(1) arr large_arr = np.random.randn(1000) large_arr.sort() large_arr[int(0.05*len(large_arr))] # ## 14. Unique and other Set logics names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) np.unique(names) ints = np.array([3,3,3,2,2,1,1,4,4]) np.unique(ints) sorted(set(names)) values = np.array([6,0,0,3,2,5,6]) np.in1d(values,[2,3,6]) # ## 15. File Input and Output with Arrays arr = np.arange(10) np.save('some_array',arr) np.load('some_array.npy') np.savez('array_archive.npz',a=arr,b=arr) arch=np.load('array_archive.npz') arch['b'] np.savez_compressed('arrays_compressed.npz',a=arr,b=arr) # ## 16. Linear Algebra # + x = np.array([[1,2,3],[4,5,6]]) y = np.array([[6.,23.],[-1,7],[8,9]]) print(x) print(y) # - x.dot(y) # np.dot(x,y) np.dot(x,np.ones(3)) #x@np.ones(3) # ## 17. Random samples=np.random.normal(size=(4,4)) samples N=1000000 # %timeit samples =[normalvariate(0,1) for _ in range(N)] # %timeit np.random.normal(size=N) # ## 18. Example position = 0 walk = [position] steps = 1000 for i in range(steps) : step = 1 if random.randint(0,1) else -1 position += step walk.append(position) plt.plot(walk[:100]) plt.show() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 9.6.0 准备皮卡丘数据集 # + import os import json from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt from mxnet.gluon import utils as gutils # pip install mxnet from mxnet import image data_dir = '../../data/pikachu' os.makedirs(data_dir, exist_ok=True) # - # ## 1. 下载原始数据集 # 见http://zh.d2l.ai/chapter_computer-vision/object-detection-dataset.html # + def _download_pikachu(data_dir): root_url = ('https://apache-mxnet.s3-accelerate.amazonaws.com/' 'gluon/dataset/pikachu/') dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8', 'train.idx': 'dcf7318b2602c06428b9988470c731621716c393', 'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'} for k, v in dataset.items(): gutils.download(root_url + k, os.path.join(data_dir, k), sha1_hash=v) if not os.path.exists(os.path.join(data_dir, "train.rec")): print("下载原始数据集到%s..." % data_dir) _download_pikachu(data_dir) # - # ## 2. MXNet数据迭代器 def load_data_pikachu(batch_size, edge_size=256): # edge_size:输出图像的宽和高 train_iter = image.ImageDetIter( path_imgrec=os.path.join(data_dir, 'train.rec'), path_imgidx=os.path.join(data_dir, 'train.idx'), batch_size=batch_size, data_shape=(3, edge_size, edge_size), # 输出图像的形状 # shuffle=False, # 以随机顺序读取数据集 # rand_crop=1, # 随机裁剪的概率为1 min_object_covered=0.95, max_attempts=200) val_iter = image.ImageDetIter( path_imgrec=os.path.join(data_dir, 'val.rec'), batch_size=batch_size, data_shape=(3, edge_size, edge_size), shuffle=False) return train_iter, val_iter batch_size, edge_size = 1, 256 train_iter, val_iter = load_data_pikachu(batch_size, edge_size) batch = train_iter.next() batch.data[0][0].shape, batch.label[0][0].shape # ## 3. 转换成PNG图片并保存 def process(data_iter, save_dir): """batch size == 1""" data_iter.reset() # 从头开始 all_label = dict() id = 1 os.makedirs(os.path.join(save_dir, 'images'), exist_ok=True) for sample in tqdm(data_iter): x = sample.data[0][0].asnumpy().transpose((1,2,0)) plt.imsave(os.path.join(save_dir, 'images', str(id) + '.png'), x / 255.0) y = sample.label[0][0][0].asnumpy() label = {} label["class"] = int(y[0]) label["loc"] = y[1:].tolist() all_label[str(id) + '.png'] = label.copy() id += 1 with open(os.path.join(save_dir, 'label.json'), 'w') as f: json.dump(all_label, f, indent=True) process(data_iter = train_iter, save_dir = os.path.join(data_dir, "train")) process(data_iter = val_iter, save_dir = os.path.join(data_dir, "val")) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import intake catalog = intake.open_catalog("./catalog.yml") open_data = catalog.la_open_data service_requests = open_data.search('Small Cell') for entry_id, entry in service_requests.items(): display(entry) small_cells = service_requests['https://data.lacity.org/api/views/3nrm-mq6k'].read() small_cells.head() # ## Making a Map using Geopandas # # Now that we have a dataframe, we can turn it into a Geodataframe and plot it using geopandas. # # However, this is pretty hard to use as a map. import geopandas as gpd from ipyleaflet import Map, GeoData, basemaps, LayersControl gdf = gpd.GeoDataFrame(small_cells) gdf.plot() # # Point Map using ipyleaflet # # Using ipyleaflet, we can add a geodataframe layer m = Map(center=(33.8711,-117.8628), zoom = 9, basemap= basemaps.Esri.WorldTopoMap) small_cell_layer = GeoData(geo_dataframe = gdf, name = 'Small Cells') m.add_layer(small_cell_layer) m # # Heatmap, instead # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np import easyocr import matplotlib.pyplot as plt # %matplotlib inline Image=r'C:\Users\divek\Desktop\sample.jpg' def recognize_text(img_path): '''loads an image and recognizes text.''' reader = easyocr.Reader(['en']) return reader.readtext(img_path) result = recognize_text(Image) result img = cv2.imread(Image) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) def extract_text(img_path): '''loads an image, recognizes text, and overlays the text on the image.''' # loads image img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) dpi = 80 fig_width, fig_height = int(img.shape[0]/dpi), int(img.shape[1]/dpi) plt.figure() f, axarr = plt.subplots(1,2, figsize=(fig_width, fig_height)) axarr[0].imshow(img) # recognize text result = recognize_text(img_path) # if OCR prob is over 0.5, overlay bounding box and text for (bbox, text, prob) in result: if prob >= 0.5: # display print(f'Detected text: {text} (Probability: {prob:.2f})') # get top-left and bottom-right bbox vertices (top_left, top_right, bottom_right, bottom_left) = bbox top_left = (int(top_left[0]), int(top_left[1])) bottom_right = (int(bottom_right[0]), int(bottom_right[1])) # create a rectangle for bbox display cv2.rectangle(img=img, pt1=top_left, pt2=bottom_right, color=(255, 0, 0), thickness=10) # put recognized text cv2.putText(img=img, text=text, org=(top_left[0], top_left[1] - 10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=8) # show axarr[1].imshow(img) extract_text(Image) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Load contact matrices from Wuhan and tailor it to the camp of interest import pandas as pd import numpy as np wuhan_matrix=pd.read_csv('Contact_matrix_wuhan.csv',index_col=0) moria_params=pd.read_csv('moria_params.csv',index_col=0) moria_params_pop=moria_params[moria_params['Variable'] == 'Population_structure'] moria_params_pop_value=moria_params_pop.Value.values contact_matrix=wuhan_matrix.values contact_matrix.shape # + # we have 1-16 in 5 year bands age_limits=np.array([0,10,20,30,40,50,60,70,80]) n_categories=len(age_limits)-1 ind_limits=(age_limits/5).astype(int) p=[] for cc in range(n_categories): for i in range(ind_limits[cc],ind_limits[cc+1]): p.append(moria_params_pop_value[cc]/(ind_limits[cc+1]-ind_limits[cc])) M=np.zeros((n_categories,n_categories)) for rr in range(n_categories): for cc in range(n_categories): V2=0 sump=sum(p[(ind_limits[cc]+1):ind_limits[cc+1] ])*sum(p[(ind_limits[rr]+1):ind_limits[rr+1]]) for i in range(ind_limits[cc]+1,ind_limits[cc+1]): for j in range(ind_limits[rr]+1,ind_limits[rr+1]): V2+=contact_matrix[j-1,i-1]*p[i-1]*p[j-1]/sump M[rr,cc] = V2 # - col_names=[] for i in range(M.shape[0]): col_names.append('Age '+str(age_limits[i])+'-'+str(age_limits[i+1])) moria_contact_dict={} for col,index in zip(col_names,list(range(n_categories))): moria_contact_dict[col]=M[:,index] moria_contact_matrix=pd.DataFrame.from_dict(moria_contact_dict) moria_contact_matrix.to_csv('moria_contact_matrix.csv') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="pyPei1w4ptMB" # # Assignment-8 # # Author: (201011057) # + id="gKsTi1lvrW7F" import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.layers import Input, Dense, Dropout, Embedding, LSTM, Bidirectional from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences # + colab={"base_uri": "https://localhost:8080/"} id="edoiBo0FqVoF" outputId="e6310e8c-a7f5-44f1-a207-c2107f32d345" # downloading the dataset # !gdown https://drive.google.com/uc?id=18l6IwSqavnqtLQpVnrRqOugZf9XkhEAN # + colab={"base_uri": "https://localhost:8080/"} id="9ATcJ49uqzGF" outputId="6889e126-d798-4e3c-9983-71d0430356ec" # unzipping # !unzip /content/jigsaw-toxic-comment-classification-challenge.zip # !unzip /content/train.csv.zip # + id="fdhLsfERtExn" # train data df = pd.read_csv("/content/train.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 255} id="9gMJmhBrtMEv" outputId="6abc4136-b097-48cd-cca4-395e8d47d8b2" df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="N5syG1EPtPlx" outputId="712870b2-e1f8-40bf-f4a5-6efa25367f72" labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] # adding 'none' label for a scenario where record is classified in neither of the given classes df['none'] = 1 - df[labels].max(axis=1) df.head() # + id="2P422quIZfQg" df['none'] = 1 - df[labels].max(axis=1) # filling null with token df['comment_text'].fillna('',inplace=True) # + id="QqgCXYFwaEKx" # updated labels labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'none'] # + id="r9fdDdlfaJcC" # maximum sequence length of input (i.e. every sequence will be padded upto this length) max_sequence_length = 128 # tokenizer - to split text sequence into tokens tokenizer = Tokenizer() tokenizer.fit_on_texts(df['comment_text']) sequences = tokenizer.texts_to_sequences(df['comment_text']) # to pad the sequences upto max length pad_sequences = pad_sequences(sequences, maxlen=max_sequence_length) # + id="IP_CnGdLbKcn" def build_model(vocab_size, max_sequence_length, n_class): input = Input(shape=(max_sequence_length,)) # step-1: calculating embedding of input sequenes # transforms: (batch_size x max_seq_len) -> (batch size x max_seq_len x embedding_dim) # step-2: applying bidirectional lstm # transforms: (batch_size x 2*16) x = Bidirectional(LSTM(16))(Embedding(input_dim=vocab_size, output_dim=64, input_length=max_sequence_length, embeddings_initializer='random_normal')(input)) x = Dropout(0.5)(x) # step-3: dense layer: 32 -> 16 x = Dense(16, activation='relu')(x) # step-4: dense layer: 16 -> 7 output = Dense(n_class, activation='sigmoid')(x) model = tf.keras.Model(inputs=input, outputs=output) # loss: binary cross entropy # optimizer: Adam model.compile (optimizer='adam', loss="binary_crossentropy", metrics=['accuracy', 'Precision', 'Recall']) return model # + colab={"base_uri": "https://localhost:8080/"} id="JrjgTZdTb_qL" outputId="50b2f102-ced4-4d44-af27-b295b317fa88" vocab_size = len(tokenizer.word_index) + 1 n_class = len(labels) model = build_model(vocab_size, max_sequence_length, n_class) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="MIUWgSLScEX6" outputId="4eda0464-e127-4cd0-ecce-13d99bdb85ab" batch_size = 1024 epochs = 20 # fitting the model history = model.fit(pad_sequences, y=df[labels], batch_size=batch_size, epochs=epochs, validation_split=0.1, shuffle = True ) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="Z17qdhCqf8w9" outputId="9998b38e-d29b-4ffe-9a8d-f267a6fc5d31" # plotting training and validation accuracy plt.plot(history.history['accuracy'], label='Training Accuracy') plt.plot(history.history['val_accuracy'], label='Validation Accuracy') plt.title('Training and Validation Accuracy') plt.xlabel('Epoch') plt.ylabel('Metric Value') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="tA9uXW8cgR9S" outputId="4802ed36-8420-44c8-e0b3-6b708ff68465" # plotting training and validation precision plt.plot(history.history['precision'], label='Training Precision') plt.plot(history.history['val_precision'], label='Validation Precision') plt.title('Training and Validation Precision') plt.xlabel('Epoch') plt.ylabel('Metric Value') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="qKuwMZiQhPGx" outputId="97065b0b-9eaf-4f30-a4a6-6f990e4de326" # plotting training and validation recall plt.plot(history.history['recall'], label='Training Recall') plt.plot(history.history['val_recall'], label='Validation Recall') plt.title('Training and Validation Recall') plt.xlabel('Epoch') plt.ylabel('Metric Value') plt.legend() plt.show() # + id="nxdpbTrHipQ4" # defining f1 score def f1(prec, recall): return (2 * prec * recall) / (prec + recall) # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="x6gDmyPvEjd1" outputId="8f500c5b-c902-4240-b0ec-455855b9dbec" # final scores pd.DataFrame(dict({ 'precision': [history.history["precision"][-1], history.history["val_precision"][-1]], 'recall': [history.history["recall"][-1], history.history["val_recall"][-1]], 'f1': [f1(history.history["precision"][-1],history.history["recall"][-1]), f1(history.history["val_precision"][-1],history.history["val_recall"][-1])], }), index = ['train', 'validation']) # --- # title: "Loop over maps, numpy arrays" # date: 2020-04-12T14:41:32+02:00 # author: "" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Loop over maps # + # Definition of dictionary europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo', 'italy':'rome', 'poland':'warsaw', 'austria':'vienna' } # Iterate over europe for k,v in europe.items(): print('the capital of ' + str(k) +' is ' + str(v)) # - # ### Loop over 1D Numpy array import numpy as np np_height = [1.2,32,23.2,23.1] # For loop over np_height for x in np_height: print(x) # ### Loop over 2D Numpy array import numpy as np baseball = [[180, 78.4], [215, 102.7], [210, 98.5], [188, 75.2]] np_baseball = np.array(baseball) # flatten, printing rows sequentially for x in np.nditer(np_baseball): print(x) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1]) x, y = spike_profile.get_plottable_data() plt.plot(x, y, '--k') print("SPIKE distance: %.8f" % spike_profile.avrg()) plt.show() # - # # 1. Spike train object # - an object that stores spike train. # - note that spike train just consists of many timepoints. # - A SpikeTrain object consists of # - the **spike times** given as numpy arrays # - as well as the **edges** of the spike train as [t_start, t_end]. # # The following code creates such a spike train with some arbitrary spike times: # + import numpy as np from pyspike import SpikeTrain spike_train = SpikeTrain(np.array( [[0.1, 0.3, 0.2, 0.4]] ), [0.0, 1.0] ) # - # spike_train spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) avrg_isi_profile = spk.isi_profile(spike_trains) avrg_spike_profile = spk.spike_profile(spike_trains) avrg_spike_sync_profile = spk.spike_sync_profile(spike_trains) spk.spike_sync(spike_trains[0], spike_trains[1]) # d # !pip install python-louvain # !pip install networkx # !conda install -n base ipykernel --update-deps --force-reinstall import networkx as nx import matplotlib.pyplot as plt import numpy as np import copy from networkx.algorithms import community import matplotlib.animation as animation Add_Inner_edges(10,1) # + def Add_Inner_edges(range, num): inner_edges = [] while len(inner_edges) < num: tmp = np.sort(np.random.choice(range, size=2, replace=None)).tolist() tmp += [np.random.uniform(0, 1)] # random weight new_edge = tuple(tmp) if new_edge not in inner_edges: inner_edges = inner_edges + [new_edge] return inner_edges def Add_Outer_edges(Community_all, num): # 두 커뮤니티 선택 outter_edges = [] while len(outter_edges) < num: #group_choiced = np.random.choice(range(len(Community_all)), size=2, replace=None) # 범용적으로 커뮤니티 선택할 시 if len(outter_edges) < 3: group_choiced = np.random.choice([0, 1], size=2, replace=None) elif len(outter_edges) < 6: group_choiced = np.random.choice([0, 2], size=2, replace=None) elif len(outter_edges) < 10: group_choiced = np.random.choice([1, 2], size=2, replace=None) tmp = np.sort([np.random.choice(Community_all[group_choiced[0]], replace=None), np.random.choice(Community_all[group_choiced[1]], replace=None)]).tolist() tmp += [np.random.uniform(0, 1)] # random weight new_edge = tuple(tmp) if new_edge not in outter_edges: outter_edges = outter_edges + [new_edge] return outter_edges """ Network 생성 """ # 총 150명 G = nx.Graph() G.add_nodes_from(range(150)) # Community 설정 3개의 커뮤니티 Community1 = range(0, 40) Community2 = range(40, 90) Community3 = range(90, 150) Community_all = Community1, Community2, Community3 # 내부 연결 추가 Inner_edges_1 = Add_Inner_edges(range=Community1, num=100) # Community 1 100개 Inner_edges_2 = Add_Inner_edges(range=Community2, num=150) # Community 1 150개 Inner_edges_3 = Add_Inner_edges(range=Community3, num=200) # Community 1 200개 G.add_weighted_edges_from(Inner_edges_1) G.add_weighted_edges_from(Inner_edges_2) G.add_weighted_edges_from(Inner_edges_3) # 외부 연결 추가 Outter_edges = Add_Outer_edges(Community_all, 50) # Community 1-2-3 간에 50개 G.add_weighted_edges_from(Outter_edges) pos = nx.spring_layout(G) # - int(3.0) # + import community as lvcm """ Louvain method """ partition = lvcm.best_partition(graph=G, partition=None, weight='weight', resolution=1., randomize=True) max_k_w = [] for com in set(partition.values()): list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] max_k_w = max_k_w + [list_nodes] """ Make Community Color list """ community_num_group = len(max_k_w) color_list_community = [[] for i in range(len(G.nodes()))] for i in range(len(G.nodes())): for j in range(community_num_group): if i in max_k_w[j]: color_list_community[i] = j # + """ Plot Community """ fig, ax = plt.subplots() edges = G.edges() weights = [G[u][v]['weight'] for u, v in edges] Feature_color_sub = color_list_community node_size = 50 d = dict(G.degree) nx.draw_networkx_nodes( G = G, pos = pos, nodelist=d.keys(), node_size=[v * 100 for v in d.values()], node_color=Feature_color_sub, cmap='jet', vmin=0, vmax=community_num_group, ax = ax ) nx.draw_networkx_edges( G = G, pos = pos, edge_color = weights, edge_cmap = plt.cm.Blues, ax = ax ) # - fig.savefig('save.png') d.keys() print(ax) # + plt.xticks([]) plt.yticks([]) plt.colorbar(im) plt.show(block=False) # - import networkx as nx import matplotlib.pyplot as plt import numpy as np import copy from networkx.algorithms import community import matplotlib.animation as animation import community as lvcm import scipy import pickle import pandas as pd # + import networkx as nx import matplotlib.pyplot as plt import numpy as np import copy from networkx.algorithms import community import matplotlib.animation as animation import community as lvcm import scipy import pickle import pandas as pd #1. create network. distMat = np.load("score_matrix.npy") print(distMat) positions = pd.read_pickle("positions.pkl") print(positions) n_nodes = distMat.shape[1] # number of nodes = number of channels G = nx.Graph() # We set the electrode as a node (e.g., circles in the network map) channel_list = ["channel{}".format(int(i)) for i in positions[0]] print(channel_list) G.add_nodes_from(channel_list) pos = {} for i in range(len(channel_list)): x_now = positions.iloc[i, 1] y_now = positions.iloc[i, 2] position_now = [int(x_now), int(y_now)] pos[channel_list[i]] = position_now print(pos[channel_list[1]]) # we set the degree of synchronization between the electrodes as an edge # (e.g., lines in the network map). for i in range(n_nodes): for j in range(i+1, n_nodes): sync_score = distMat[i,j] # the links with synchronized scores less than 0.5 were filtered out. if sync_score >= 1/2: G.add_edge(i, j, weight = sync_score) # Louvain method partition = lvcm.best_partition(graph=G, partition=None, random_state = 1) max_k_w = [] for com in set(partition.values()): list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] max_k_w = max_k_w + [list_nodes] # list comprehension. concat [member list] of each community # Make Community Color list community_num_group = len(max_k_w) color_list_community = [[] for i in range(len(G.nodes()))] # list comprehension. empty list of list for i in range(len(G.nodes())): for j in range(community_num_group): if i in max_k_w[j]: color_list_community[i] = j # Plot Community fig, ax = plt.subplots() edges = G.edges() weights = [G[u][v]['weight'] for u, v in edges] Feature_color_sub = color_list_community d = dict(G.degree) print(d.keys()) node_size_normalized = np.fromiter(d.values(), dtype = float) print(node_size_normalized) node_size_normalized = (node_size_normalized - np.min(node_size_normalized))/(np.max(node_size_normalized) - np.min(node_size_normalized)) nx.draw_networkx_nodes( G=G, pos = pos, nodelist=d.keys(), node_size = 30 * (1 + node_size_normalized), node_color=Feature_color_sub, cmap='jet', vmin=0, vmax=community_num_group, ax = ax ) edges = nx.draw_networkx_edges( G = G, pos = pos, edge_color = weights, width = 1.5, edge_cmap = plt.cm.Blues, ax = ax) # %pyrun("plt.xticks([])") # %pyrun("plt.yticks([])") plt.colorbar(edges) # %pyrun("plt.show(block=False)") fig.savefig('networkfig.png') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from udntools.utils import DFSDictByDistance from udntools.region import ServiceRegion import matplotlib.pyplot as plt service_region = ServiceRegion(0, 100, 0, 100, 100, 1000, if_fix_bs=True) dfs_3 = DFSDictByDistance(service_region.bs_position_, 3.0) dfs_5 = DFSDictByDistance(service_region.bs_position_, 5.0) dfs_7 = DFSDictByDistance(service_region.bs_position_, 7.0) dfs_10 = DFSDictByDistance(service_region.bs_position_, 10.0) def get_comp_number(region): count = 0 for key, values in region.cluster_set_.items(): num = np.size(values) if num > 1: count += num return count # + import matplotlib params = {'axes.labelsize': 15,'axes.titlesize':10, 'text.fontsize': 15, 'legend.fontsize': 15, 'xtick.labelsize': 15, 'ytick.labelsize': 15} matplotlib.rcParams.update(params) # 图片大小 fig = plt.figure(figsize=(13, 13)) # 使能中文字体 # -*- coding:utf-8 -*- plt.subplot(221) plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签 plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 dfs_3.dfs_plot(service_region.bs_position_) plt.xlim(0, 100) plt.ylim(0, 100) plt.xlabel("X(m)", size=13) plt.ylabel("Y(m)", size=13) plt.grid(True) plt.title("(a) $\tau=3$",fontproperties = 'SimHei', size=15) plt.subplot(222) dfs_5.dfs_plot(service_region.bs_position_) plt.xlim(0, 100) plt.ylim(0, 100) plt.xlabel("X(m)", size=13) plt.ylabel("Y(m)", size=13) plt.title("(b) $\tau=5$",fontproperties = 'SimHei', size=15) plt.grid(True) plt.subplot(223) dfs_7.dfs_plot(service_region.bs_position_) plt.xlim(0, 100) plt.ylim(0, 100) plt.xlabel("X(m)", size=13) plt.ylabel("Y(m)", size=13) plt.title("(c) $\tau=7$",fontproperties = 'SimHei', size=15) plt.grid(True) plt.subplot(224) dfs_10.dfs_plot(service_region.bs_position_) plt.xlim(0, 100) plt.ylim(0, 100) plt.xlabel("X(m)", size=13) plt.ylabel("Y(m)", size=13) plt.title("(d) $\tau=10$",fontproperties = 'SimHei', size=15) plt.grid(True) # 存储成为pdf fig.savefig('dfs_network_show.pdf') fig.savefig('dfs_network_show.png') plt.show() # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 4: Geospatial Data Visualization # # In this lab, you will analyze the geographical distribution of the population of the State of Illinois and its temporal changes from 1980 to 2010. You will find that most of the tasks were covered in the lecture, and it is another reproduction of the tasks. The difference is the study area (Illinois instead of Texas), and we focus on temporal changes of the general population instead of COVID-19 cases. # # The data of this lab was obtained from the following resources. # * County geometry: https://www.census.gov/geographies/mapping-files/time-series/geo/tiger-line-file.2010.html # * Temporal chagnes of the population: http://www.idph.state.il.us/health/census1980_2010.htm # # ## Structure # ### 1. Data Manipulation (0.5 point) # In order to visualize the geospatial data, you need to obtain data (i.e., GeoDataFrame), properly. Here, with the help of `GeoPandas`, let's create a GeoDataFrame as shown below.
    # # # ### 2. A Single Choropleth Map (1.5 point) # Here, you will create a single Choropleth map with the merged `county` GeoDataFrame from the previous task. The result should look to the map below.
    # # # ### 3. Multiple Choropleth Map (2 point) # # Here, you will make a `figure` have four `axes`, and each of the `axes` will have a Choropleth map of the population from 1980 to 2010. The result should look to the map below. # # # ### 4. Creating a Web Map with GeoPandas and Folium (1 point) # Here, you will be making an interactive web map with GeoPandas and Folium by using `gpf.explore()` method. The result web map should look like this. # # ## Notes: # **Before you submit your lab, make sure everything runs as expected WITHOUT ANY ERROR.**
    # **Make sure you fill in any place that says `YOUR CODE HERE` or `YOUR ANSWER HERE`:** FULL_NAME = "" # Import necessary packages import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import matplotlib.patheffects as pe import mapclassify from matplotlib.colors import LinearSegmentedColormap import numpy as np # ## 1. Data Manipulation (0.5 point) # In order to visualize the geospatial data, you need to obtain data (i.e., GeoDataFrame), properly. Here, with the help of `GeoPandas`, let's create a GeoDataFrame as shown below.
    # # # # **1.1.** (0.25 point) Import two datasets (`illinois_county.shp` and `population_chage.csv`) in the `data` folder with the names of `county` and `pop`, respectively. Use GeoPandas for `illinois_county.shp`, given it has geometry. Use Pandas for `population_chage.csv` given it doesn't have geometry. # # **1.2.** (0.25 point) Investigate the contents to find the shared information between two DataFrames and **merge `pop` to `county`**. You can use either the indexes to merge or the name, since they have a colume with the same name. Take a look at this website for your reference. # + # Your code here # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" # Task 1.1. assert type(county) == gpd.GeoDataFrame assert type(pop) == pd.DataFrame # Task 1.2. assert county.shape == (102, 7) print('Success!') # - # ## 2. A Single Choropleth Map (1.5 point) # # Here, you will create a single Choropleth map with the merged `county` GeoDataFrame from the previous task. The result should look to the map below.
    # # # # # **2.1.** (0.25 point) Initiate a plot with `plt.subplots()`. Specify the figure size as 10 by 20, and **Note** that you are making a single map.
    # **2.2.** (0.5 point) Create a Choropleth map based on the population in 2010 (i.e., `Year2010` column in `county` GeoDataFrame). Use the following attributes for a specific style. # * Colormap (`cmap`): 'Blues' # * Classification scheme (`scheme`): 'FisherJenks' # * Number of classes (`k`): 5 # # **2.3.** (0.5 point) Create boundaries of the county polygons with `gpd.boundary` and plot it over the Choropleth map, above. Use the following attributes for a specific style. # * Color (`color`): 'black' # * Line width (`linewidth`): 0.5 # * Line style (`linestyle`): 'dotted' # # **2.4.** (0.25 point) Fill in the missing information (i.e., `NAME OF YOUR DATAFRAME` and `NAME OF A COLUMN`) from the code below for annotating each county with its `GEOID` column.
    # *GEOIDs* are numeric codes that uniquely identify all administrative/legal and statistical geographic areas for which the Census Bureau tabulates data.
    # # ```python # for idx, row in [`NAME OF YOUR DATAFRAME`].iterrows(): # Iterate everyrow in a GeoDataFrame # ax.text(s=row[`NAME OF A COLUMN`], # String to be displayed # x=row['geometry'].centroid.coords[:][0][0], # X coordinate of label # y=row['geometry'].centroid.coords[:][0][1], # Y coordinate of label # fontsize=10, # color='white', # ha='center', # Horizontal align # va='center', # Vertical align # path_effects=[pe.withStroke(linewidth=2, foreground="black")] # ) # ``` # # + # Your code here # - # ## 3. Multiple Choropleth Map (2 point) # # Here, you will make a `figure` have four `axes`, and each of the `axes` will have a Choropleth map of the population from 1980 to 2010. The result should look to the map below. # # # ### 3.1. Create a customized color map (0.5 point) # **3.1.1.** Visit `ColorBrewer` and grab hex color codes of `YlGnBu` color with `5` classes as you can see on the screen capture below. # # # **3.1.2.** (0.25 point) Save the hex code as a list, `color_brewer`. Note that you need to make each hex code as a string.
    # ```python # color_brewer = ['#hexcode1', '#hexcode2', '#hexcode3'..] # ``` # **3.1.3.** (0.25 point) Feed the list to `LinearSegmentedColormap.from_list()` method and create an instance of color bar with the name of `cm`. When you call `cm`, the following color bar should pop up. Note that you need to use attribute `N=5` to make this ColorMap has only five colors. # # + # Your code here # Task 3.1.1. # https://colorbrewer2.org/#type=sequential&scheme=YlGnBu&n=5 # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" # Task 3.1. assert type(color_brewer) == list assert type(cm) == LinearSegmentedColormap assert cm.N == 5 print('Success!') # - # ### 3.2. Define Map Classes with `mapclassify` (0.5 point). # # **3.2.1.** (0.25 point) Create a Dataframe, named `pop_change` by slicing `county` GeoDataFrame. The DataFrame `pop_change` will have 102 rows (i.e., the number of counties) and four columns (`Year1980`, `Year1990`, `Year2000`, `Year2010`) granted from `county` GeoDataFrame. The code below will help you with this task. # ```python # pop_change = county[['List of Columns']] # ``` # **3.2.2.** (0.25 point) Feed `pop_change` DataFrame to `mapclassify.FisherJenks()` method and save the instance to `map_class` variable. This will provide the classes that can be used regardless of columns (`Year1980`, `Year1990`, `Year2000`, `Year2010`). # # + # Your code here # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" def array_comparison(arr1, arr2): comparison = arr1 == arr2 equal_arrays = comparison.all() return equal_arrays # Task 3.2. assert pop_chage.columns.to_list() == ['Year1980', 'Year1990', 'Year2000', 'Year2010'] assert type(map_class) == mapclassify.classifiers.FisherJenks assert array_comparison(map_class.bins, np.array([81625, 201081, 516418, 916924, 5376741])) print('Success!') # - # ### 3.3. Creating a Figure with Multiple Axes (1 point) # # **3.3.1.** (0.25 point) Create `fig` and `axes` with `plt.subplots(nrows=[number needed], ncols=[number needed], figsize=(15, 10))`. As we have four columns (`Year1980`, `Year1990`, `Year2000`, `Year2010`) to be displayed, we want to have **1** row and **4** columns.
    # **3.3.2.** (0.5 point) Populate each of the axes with the population distribution of each decennial. You can modify and provide proper variables to the following two lines of the code. Note that you can answer this question in two ways: 1) Copy and paste the codes four times or 2) take advantage of a for loop. # # ```python # county.plot(`NAME OF A COLUMN`, # ax=`LOCATION ON THE AXES`, # cmap=cm, # This will use the colormap defined in the previous task # scheme='user_defined', # This use the classification defined in the previous task # classification_kwds={'bins': map_class.bins} # This use the classification defined in the previous task # ) # county.boundary.plot(color='black', linewidth=0.5, linestyle=':', ax=`LOCATION ON THE AXES`) # ``` # **3.3.3.** (0.25 point) Set the title of each of the axes with different name. For example, the first plot can be named as 'Population at Year 1980'. # + # Your code here # - # ## 4. Creating a Web Map with GeoPandas and Folium (1 point) # # Here, you will be making an interactive web map with GeoPandas and Folium by using `gpf.explore()` method. The result web map should look like this.
    # In detail, the web map should be colored based on the population in 2010, with the color map of `Blues`. In addition, the counties should be classified into 7 groups based on `FisherJenks`. When you hover the mouse over counties, the name of a county should be appeared. Also, upon clicking, the pop-up will show the population from 1980 to 2010. # # **4.1.** Modify the code provided below, and create a web map based on the information in the GeoDataFrame `county`. # ```python # m = county.explore(column=`STRING NEEDED`, # You will display the population in 2010. # cmap=`STRING NEEDED`, # You will use colormap named 'Blues'. # scheme=`STRING NEEDED`, # You will use `FisherJenks` algorithm for the classification. # k=7,# This is the number of classes. # popup=`LIST OF VALUES NEEDED`, # The pop up will show the population from 1980 to 2010. # tooltip=`STRING NEEDED` # Hovering mouse will display the name of a county. # ) # ``` # **4.2.** Save the web map into the data folder as the name of `webmap_YOUR_NET_ID.html`. # + # Your code here # - # ### *You have finished Lab 4: Geospatial Data Visualization* # Please name your jupyter notebook as `GEOG489_Lab4_[YOUR_NET_ID].ipynb`, and upload it to https://learn.illinois.edu. # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Extended Technology Application # ## *Maximum Sustainable Harvest* # In certain situations, biologists are able to determine what is called a *reproduction curve*. This is a function # # > $y = f(P)$ # # such that if $P$ is the population after $P$ years, then $f(P)$ is the population a year later, at time $t + 1$. Such a curve is shown below. #   The line $y = P$ is significant because if it ever coincides with the curve $f(P)$, then we know that the population stays the same from year to year. Here the graph of $f$ lies mostly above the line, indicating that the population is increasing. #   Too many deer in a forest can deplete the food supply and eventually cause the population to decrease for lack of food. Often in such cases and with some controversy, hunters are allowed to "harvest" some of the deer. Then with a greater food supply, the remaining deer population might actually prosper and increase. #   We know that a population $P$ will grow to a population $f(P)$ in a year. If this were a population of fur-bearing animals and the population were increasing, then hunters could "harvest" the amount # # > $f(P) - P$ # # each year without shrinking the initial population $P$. If the population were remaining the same or decreasing, then such a harvest would deplete the population. #   Suppose that we want to know the value of $P_0$ that would allow the harvest to be the largest. If we could determine that $P_0$, we could let the population grow until it reached that level and then begin harvesting year after year the amount $f(P_0) - P_0$. #   Let the harvest function $H$ be given by # # > $H(P) = f(P) - P$. # # Then # > $H'(P) = f'(P) - 1$. # # Now, if we assume that $H'(P)$ exists for all values of $P$ and that there is only one critical value, it follows that the *maximum sustainable harvest* occurs at that value $P_0$ such that # # > $H'(P_0) = f'(P_0) - 1 = 0$ # # and # > $H''(P_0) = f''(P_0) < 0$. # # Or, equivalently, we have the following. # > `THEOREM` # > --- # > The **maximum sustainable harvest** occurs at $P_0$ such that # > # >     $f'(P_0) = 1$ and $f''(P_0) < 0$, # > # > is given by # > # >     $H(P_0) = f(P_0) - P_0$. # ## EXERCISES # # For each reproduction curve in Exercises 1–3, do the following. # # 1. Graph the reproduction function, the line $y = P$, and the harvest function using the same viewing window. # 2. Find the population at which the maximum sustainable harvest occurs. Use both a graphical solution and a calculus solution. # 3. Find the maximum sustainable harvest. # ### 1. $f(P) = P(10 - P)$, where $P$ is measured in thousands. # #### 1. Graphing f <- function (P) P * (10 - P) y <- function (P) P H <- function (P) f(P) - P exercise1.plot <- function (xlim.max = 15, ylim.max = 30) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise1.plot() # #### 2. Find population at which maximum sustainable harvest occurs # # ##### Graphical approach: exercise1.plot() max <- optimize(H, c(0, 100), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) print(max) # ##### Calculus approach: # # > $f(P) = P(10 - P)$ # > $f'(P) = 10 - 2P$ # > $H(P) = f(P) - P$ # > $H'(P) = f'(P) - 1$ # > $H'(P) = 10 - 2P - 1$ # > $H'(P) = -2P + 9$ # > $0 = -2P + 9$ # > $\color{blue}{P = 4.5}$ # + df <- function (P) 10 - (2 * P) exercise1.plot() a <- max$maximum[1] max.x <- 15 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 3, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 0.8, (f(a) - y(a)) - a * 0.75, "Maximum sustainable harvest", col = 6) print(f(a)) # - # ### 2. $f(P) = -0.025P^2 + 4P$, where $P$ is measured in thousands. This is the reproduction curve in the Hudson Bay area for the snowshoe hare, a fur-bearing animal. # #### 1. Graphing f <- function (P) (-0.025 * P ^ 2) + (4 * P) y <- function (P) P H <- function (P) f(P) - P exercise2.plot <- function (xlim.max = 175, ylim.max = 175) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise2.plot() # #### 2. Find population at which maximum sustainable harvest occurs # # ##### Graphical approach: exercise2.plot() max <- optimize(H, c(0, 100), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) print(max) # ##### Calculus approach: # # > $f(P) = -0.025P^2 + 4P$ # > $f'(P) = -0.05P + 4$ # > $H(P) = f(P) - P$ # > $H'(P) = f'(P) - 1$ # > $H'(P) = -0.05P + 4 - 1$ # > $H'(P) = -0.05P + 3$ # > $0 = -0.05P + 3$ # > $\color{blue}{P = 60}$ # #### 3. Maximum sustainable harvest # + df <- function (P) -0.05 * P + 4 exercise2.plot() a <- max$maximum[1] max.x <- 175 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 5, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 0.65, (f(a) - y(a)) + a * 0.5, "Maximum sustainable harvest", col = 6) print(f(a)) # - # ### 3. $f(P) = -0.01P^2 + 2P$, where $P$ is measured in thousands. This is the reproduction curve in the Hudson Bay area for the lynx, a fur-bearing animal. # #### 1. Graphing f <- function (P) (-0.01 * P ^ 2) + (2 * P) y <- function (P) P H <- function (P) f(P) - P exercise3.plot <- function (xlim.max = 200, ylim.max = 150) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise3.plot() # #### 2. Find population at which maximum sustainable harvest occurs # # ##### Graphical approach: exercise3.plot() max <- optimize(H, c(0, 100), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) print(max) # ##### Calculus approach: # # > $f(P) = -0.01P^2 + 2P$ # > $f'(P) = -0.02P + 2$ # > $H(P) = f(P) - P$ # > $H'(P) = f'(P) - 1$ # > $H'(P) = -0.02P + 2 - 1$ # > $H'(P) = -0.02P + 1$ # > $0 = -0.02P + 1$ # > $\color{blue}{P = 50}$ # #### 3. Maximum Sustainable Harvest # + df <- function (P) -0.02 * P + 2 exercise3.plot() a <- max$maximum[1] max.x <- 200 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 5, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 0.95, (f(a) - y(a)) + a * 0.85, "Maximum sustainable harvest", col = 6) print(f(a)) # - # For each reproduction curve in Exercises 4 and 5, do the following. # # 1. Graph the reproduction function, the line $y = P$, and the harvest function using the same viewing window. # 2. Graphically determine the population at which the maximum sustainable harvest occurs. # 3. Find the maximum sustainable harvest. # ### 4. $f(P) = 40\sqrt{P}$, where $P$ is measured in thousands. Assume that this is the reproduction curve for the brown trout population in a large lake. # f <- function (P) (40 * sqrt(P)) y <- function (P) P H <- function (P) f(P) - P exercise4.plot <- function (xlim.max = 2400, ylim.max = 2400) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise4.plot() # #### 2. Find population at which maximum sustainable harvest occurs exercise4.plot() max <- optimize(H, c(0, 1500), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) print(max) # + df <- function (P) (20 / sqrt(P)) exercise4.plot() a <- max$maximum[1] max.x <- 2400 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 5, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 1.35, (f(a) - y(a)) + a * 0.75, "Maximum sustainable harvest", col = 6) print(f(a)) # - # ### 5. $f(P) = 0.237P\sqrt{2000 - P^2}$, where $P$ is measured in thousands. f <- function (P) (0.237 * P) * (sqrt((2000 - (P ^ 2)))) y <- function (P) P H <- function (P) f(P) - P exercise5.plot <- function (xlim.max = 60, ylim.max = 240) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise5.plot() # #### 2. Find population at which maximum sustainable harvest occurs exercise5.plot() max <- optimize(H, c(0, 44), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) print(max) # > $f(P) = 0.237P\sqrt{2000 - P^2}$ # > $f(P) = 0.237P(2000-P^2)^{\frac{1}{2}}$ # > $u = (2000-P^2)^{\frac{1}{2}}$ # > $u' = {\frac{-P}{\sqrt{2000-P^2}}}$ # > $f'(P) = 0.237P \cdot u' + u \cdot 0.237$ # > $f'(P) = 0.237P \cdot {\frac{-P}{\sqrt{2000 - P^2}}} + \sqrt{2000 - P^2} \cdot 0.237$ # > $f'(P) = \frac{-0.237P^2}{\sqrt{2000 - P^2}} + 0.237\sqrt{2000 - P^2}$ # > $f'(P) = \frac{-0.237P^2 + 0.237(2000 - P^2)}{\sqrt{2000 - P^2}}$ # > $f'(P) = \frac{-0.474P^2 + 474}{\sqrt{2000 - P^2}}$ # + df <- function (P) ((-0.474 * P ^ 2 + 474) / sqrt(2000 - P ^ 2)) exercise5.plot() a <- max$maximum[1] max.x <- 60 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 5, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 0.45, (f(a) - y(a)) - a, "Maximum sustainable harvest", col = 6) print(f(a)) # - # ### 6. The table below lists data regarding the reproduction of a certain animal. # Population (in thousands) current.population <- c(10.0, 20.0, 30.0, 40.0, 50.0) next.population <- c(09.7, 23.1, 37.4, 46.2, 42.6) # #### a. Use REGRESSION to fit a cubic polynomial to these data. model <- lm(next.population ~ current.population + I(current.population ^ 2) + I(current.population ^ 3)) print(model) # #### b. Graph the reproduction function, the line $y = P$, and the harvest function using the same viewing window. f <- function (P) model$coefficients[1] + model$coefficients[2] * P + model$coefficients[3] * P ^ 2 + model$coefficients[4] * P ^ 3 y <- function (P) P H <- function (P) f(P) - P exercise6.plot <- function (xlim.max = 60, ylim.max = 60) { plot(f, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 2, lwd = 2, xlab = expression("P"), ylab = expression("f(P)")) # red plot(y, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 3, lwd = 2, add = TRUE) # green plot(H, xlim = c(0, xlim.max), ylim = c(0, ylim.max), col = 4, lwd = 2, add = TRUE) # blue grid(lty = 2) legend("topright", legend = c(expression(f), expression(y), expression(H)), col = c(2, 3, 4), lty = c(1, 1, 1) ) } exercise6.plot() # #### c. Graphically determine the population at which the maximum sustainable harvest occurs. # + df <- function (P) -0.003324 * P ^ 2 + 0.143072 * P - 0.03381 exercise6.plot() max <- optimize(H, c(0, 45), maximum = TRUE) points(max$maximum, max$objective, col = 4, lwd = 2) a <- max$maximum[1] max.x <- 60 points(a, f(a), col = 2, lwd = 2) # Draw tangent slope <- function (x) df(a) * (x - a) + f(a) lines(c(0, max.x), c(slope(0), slope(max.x)), lty = 5) text(a - a / 10, f(a) + a / 5, "m = 1") # Draw Maximum Sustainable Harvest lines(c(a, a), c(0, y(a)), lty = 5) lines(c(a, a), c(y(a), f(a)), lwd = 2, col = 6) text(a + a * 0.4, (f(a) - y(a)) + a * 0.9, "Maximum sustainable harvest", col = 6) print(f(a)) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import io import re import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns from collections import Counter import missingno import warnings warnings.filterwarnings('ignore') # - #read data, specify index so it's easier to join and search using loc path = '../../data/new-york-city-airbnb-open-data/' listings_csv = os.path.join(path,'selected_columns_listings.csv') df = pd.read_csv(listings_csv) df.columns df.describe() df.drop(columns=['listing_url','name','host_name','host_url','host_url', 'host_since','host_location','host_listings_count', 'host_listings_count','street','state','market','calculated_host_listings_count','calculated_host_listings_count_entire_homes', 'calculated_host_listings_count_private_rooms','calculated_host_listings_count_shared_rooms' ], inplace=True) host_count = df['host_id'].value_counts() fil=df['host_total_listings_count']>1 df[fil].head() df['host_id'].value_counts() df.groupby('host_total_listings_count').size().sort_values(ascending = False)[fil] host_count_20=host_count[:35,] plt.figure(figsize=(10,5)) sns.barplot(host_count_20.index, host_count_20.values, alpha=0.8) plt.title('Number of Listing per host- Top 35') plt.ylabel('Number of Occurrences', fontsize=12) plt.xlabel('Host_id', fontsize=12) plt.xticks(rotation=50) plt.show() host_count = host_count[:1000] plt.figure(figsize=(10,5)) sns.distplot(host_count,color='g') plt.title('Number of Listing per host') plt.ylabel('Number of listing per host', fontsize=12) plt.xlabel('Host_id', fontsize=12) plt.show() missingno.bar(df, labels=True) missingno.matrix(df) df.isna().sum() #visualization for type of room sns.catplot(x='room_type', kind='count' ,data=df) fig = plt.gcf() fig.set_size_inches(5, 5) #visualization for neighbourhood sns.catplot(x='neighbourhood_group_cleansed', kind='count' ,data=df) fig = plt.gcf() fig.set_size_inches(5, 5) df['price'] = pd.to_numeric(df.price.str.replace('$',''),errors='coerce') df['host_response_rate'] = pd.to_numeric(df.host_response_rate.str.replace('%',''),errors='coerce') df['host_acceptance_rate'] = pd.to_numeric(df.host_acceptance_rate.str.replace('%',''),errors='coerce') df['host_is_superhost']=df.host_is_superhost.replace(['t','f'],[1,0]) df['host_identity_verified']=df.host_identity_verified.replace(['t','f'],[1,0]) df['is_location_exact']=df.is_location_exact.replace(['t','f'],[1,0]) df['security_deposit'] = pd.to_numeric(df.security_deposit.str.replace('$',''),errors='coerce') df['cleaning_fee'] = pd.to_numeric(df.cleaning_fee.str.replace('$',''),errors='coerce') df['extra_people'] = pd.to_numeric(df.extra_people.str.replace('$',''),errors='coerce') df['instant_bookable']=df.instant_bookable.replace(['t','f'],[1,0]) df.host_response_rate.fillna(df.host_response_rate.mean(), inplace=True) df.host_acceptance_rate.fillna(df.host_acceptance_rate.mean(), inplace=True) df.host_is_superhost.fillna(df.host_is_superhost.mean(), inplace=True) df.host_identity_verified.fillna(df.host_identity_verified.mean(), inplace=True) df.is_location_exact.fillna(df.is_location_exact.mean(), inplace=True) df.security_deposit.fillna(df.security_deposit.mean(),inplace=True) df.cleaning_fee.fillna(df.cleaning_fee.mean(),inplace=True) df.extra_people.fillna(df.extra_people.mean(),inplace=True) #Drop other type of bed - keep just "Real Bed" df=df.set_index("bed_type") df.drop(["Airbed","Couch","Futon","Pull-out Sofa"], axis=0,inplace=True) #Drop Hotel Room from room_type df=df[df.room_type != 'Hotel room'] df.reset_index(drop=True, inplace=True) pd.crosstab(df.neighbourhood_group_cleansed,df.room_type,margins=True) g_1 = sns.catplot(x="price", y="room_type", hue="neighbourhood_group_cleansed", data=df, height=5, kind="bar", palette="RdBu") g_1.despine(left=False) g_1.set_ylabels("Room_type") pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) corr=df.corr() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # OOP in Python # # OOP stands for Object-Oriented Programming. As it relates to this course, we will discuss OOP in the context of Python including the obvious keyword 'class' but also the overall organization of Python. # ## Class # # The fundamental component of object-oriented programming is the class. In Python a class is created using the keyword 'class'. Below is an example of the simplest possible class in Python. # + class MyClass: pass # To instantiate the class use the class name and open/closed parenthesis MyClass() # To assign (or create a reference to an instance) use a variable and the # equal sign mc = MyClass() print(mc) # - # What is the type of or mc? print(type(mc)) # What is the type of MyClass? print(type(MyClass)) # What does MyClass contain dir(MyClass) # ## Construction # # An important part of creating classes in OOP languages is commonly referred to as construction. In Python, construction has two distinct parts implemented by two special methods: # # * __new__ # * __init__ # # The first special method, __new__, is where the object is actual created. All Python classes inherit implicitly from a based class called object. The __new__ function takes care of creating the underlying implementation. # # **Note: Prior to Python 3, it was necessary to explicitly include object in a classes inheritance.** # # It is possible to write your own __new__ function for a class, but it is extremely rare. 99.99% of the time, you will only define an __init__ funcion for a class. However, Python does give you access and control over the lower level of instantiation through __new__. # # Also, note how there is no 'new' keyword. # # The second special method, __init__, is where you can initialize the attributes of a Python class. Let's implement an __init__ method, add some attributes and initialize them. # + class MyClass: def __init__(self): self.a = 10 self.b = 'hello' self.c = ['a', 'b', 'c'] my = MyClass() print(my.a, my.b, my.c) my.a += 1 my.b += ' world' my.c += 'd' print(my.a, my.b, my.c) # - # **Note: For those wondering about the analog to construction, destruction, all object lifetimes in Python, including those created by instantiating a class, are handled by the garbage collector. So, we don't have to worry about. In the words of Forest Gump, "That's good, one less thing."** # ## Self # # An important thing to notice above is the use of 'self' as a parameter to __init__ and as the reference for accessing the attributes a, b, c. 'self' is a reference to the current instance. It is equivalent to 'this' in C++, C# and Java. Smalltalk also uses 'self'. Think of 'self' as a reference to the instance data that needs to be passed to the class functions so they operate on the correct data. # ## Initialization # # Notice above, the values of the attributes, a, b, c, are initialized in __init__. We'd like to be able to initialize the instance when we declare the variable. Since the __init__ method is a function, it allows arguments to be passed and that is how we initialize the instance attributes. # + class MyClass: def __init__(self, a, b, c): self.a = a self.b = b self.c = c mc1 = MyClass(10, 'hello', ['a', 'b', 'c']) mc2 = MyClass(20, 'world', ['d', 'e', 'f']) print(mc1.a, mc1.b, mc1.c) print(mc2.a, mc2.b, mc2.c) print(mc1) print(mc2) # - # **Note: __init__ is a function so it can accept all of the various type of arguments that any function can, e.g. positional, named, and keyword.** # ## Representation # # You'll notice above that when we printed the class instances, mc1 and mc2, we didn't get a particularly readable output. Python provides two ways to display a class in a more readable manner: # # * __repr__ # * __str__ # # According to the official Python documentation, __repr__ is a built-in function used to compute the "official" string reputation of an object, while __str__ is a built-in function that computes the "informal" string representations of an object. # # I agree that is a subtle distinction. The intent of __repr__ is that is can be used to re-create the object by passing its results to the function eval(). Below is an example of the datetime object. # + import datetime now = datetime.datetime.now() # Prints the actual value print(str(now)) # Prints a string that can be used to recreate the object print(repr(now)) # Passing the output of repr into eval and assigning the result to now2 # creates a new object with the same value as now now2 = eval(repr(now)) print(str(now2)) # - # So, what's the 'take away': # # Implement __str__ to make ojects readable and generate output for end user # Implement __repr__ to generate code to reproduce the object and to generate output for developers # # One other thing to note is that is you provide a definition for __repr__ but do not provide __str__ Python will call __repr__ when str is invoked on the instance; however, this opposite is not true. If no __repr__ is defined then the default representation is used. # # Let's add __str__ and __repr__ functions to our class # + class MyClass: def __repr__(self): return 'MyClass({}, "{}", {})'.format(self.a, self.b, self.c) def __str__(self): return 'MyClass(a={}, b={}, c={})'.format(self.a, self.b, ';'.join(map(str, self.c))) def __init__(self, a, b, c): self.a = a self.b = b self.c = c mc1 = MyClass(10, 'hello', [1, 2, 3, 4]) print(repr(mc1)) print(str(mc1)) mc2 = eval(repr(mc1)) print(mc2) # - # ## Methods # # While a class can be used to hold only data, most classes contains both data and functionality. There are three types of methods that can be declared as part of a class: # # * Instance # * Class # * Static # ### Instance # # We have already seen some instance methods, __str__, __repr__, and __init__. These were 'special' methods provided by Python to extend/enhance the class. But, we can also add our own methods. # # **Note: Because Python uses the convention \_\_xxx\_\_ for special methods, you should avoid using this convention for your own methods to avoid any language conflicts** # # Instance methods are associated with and can only be called by an instance of the class. # + class MyClass: def __repr__(self): return 'MyClass({}, "{}", {})'.format(self.a, self.b, self.c) def __str__(self): return 'MyClass(a={}, b={}, c={})'.format(self.a, self.b, ';'.join(map(str, self.c))) def __init__(self, a, b, c): self.a = a self.b = b self.c = c def format(self, format_str='{}, {}, {}'): return format_str.format(self.a, self.b, ','.join(map(str, iter(self.c)))) mc1 = MyClass(10, 'hello', [1, 2, 3, 4]) print(mc1.format()) print(mc1.format('{:05d}<->{}<->{}')) # - # ### Class # # Class methods are methods associated with the class. They are typically meant to implement functionality that applies to all instances of the class. Class methods can be called using the class name and can be called from class instances. To define a class method a feature of Python called a decorator is used -- the @classmethod decorator. Any method 'decorated' with the @classmethod decorator will become a class method. # + class MyClass: # Note: This is a class attribute that is accessible using the class name. count = 0 @classmethod def instance_increment(cls): cls.count += 1 def __repr__(self): return 'MyClass({}, "{}", {})'.format(self.a, self.b, self.c) def __str__(self): return 'MyClass(a={}, b={}, c={})'.format(self.a, self.b, ';'.join(map(str, self.c))) def __init__(self, a, b, c): self.a = a self.b = b self.c = c MyClass.instance_increment() def format(self, format_str='{}, {}, {}'): return format_str.format(self.a, self.b, ','.join(map(str, iter(self.c)))) for i in range(10): mc = MyClass(1, 'a', [1,]) print(MyClass.count) # - # ### Static # # Static methods are methods that are bound to a class but that do not use/access a class instance. Static methods are essentially functions that are scoped by the class name. They can be accessed using the class name or instance variable. # + class MyClass: @staticmethod def do_anything(x, y, z): print(x*y*z) # Note: This is a class attribute that is accessible using the class name. count = 0 @classmethod def instance_increment(cls): cls.count += 1 def __repr__(self): return 'MyClass({}, "{}", {})'.format(self.a, self.b, self.c) def __str__(self): return 'MyClass(a={}, b={}, c={})'.format(self.a, self.b, ';'.join(map(str, self.c))) def __init__(self, a, b, c): self.a = a self.b = b self.c = c MyClass.instance_increment() def format(self, format_str='{}, {}, {}'): return format_str.format(self.a, self.b, ','.join(map(str, iter(self.c)))) mc = MyClass(10, 'hello', [1, 2, 3, 4]) mc.do_anything(1, 2, 3) MyClass.do_anything(4, 5, 6) # - # ### Overloading # # Method overloading is the ability to use the same method name more than once but with different arguments. Python does not support method overloading. If the same method is defined more than once, the last definition will be used. # + class MyClass: def overload(self, a): print(a) def overload(self, a, b): print(a, b) mc = MyClass() mc.overload(1) # - # It is possible using the positional \* operator and/or keyword \*\* arguments to acheive similar results as overloading. # + class MyClass: def overload1(self, *args): print(*args) def overload2(self, **kwargs): print(kwargs.items()) mc = MyClass() mc.overload1(1) mc.overload1(1, 2) mc.overload2(a=1) mc.overload2(a=1, b=2) # - # ## Accessibility # # Python does not support access modifiers/specifiers for data or methods. This means that all data and methods accessible to all. While there is no strict enforcement of access, there are conventions for access. The general conventions for class are: # # * Public data and methods are lowercase and '_' separated # * Private data and methods have an '_' prepended # * Protected data and methods have a double '_' prepended # # **Note: data and methods with double '_' under go name mangling where an '_' and class name is prepended to the name. # # **Note: the above conventions apply to both class-level and instance-level variables and data # + class MyClass: A = 10 _A = 20 __A = 30 @classmethod def blah(cls): print(cls.blah.__name__) @classmethod def _blah(cls): print(cls._blah.__name__) @classmethod def __blah(cls): print(cls._MyClass__blah.__name__) def __init__(self): self.a = 100 self._a = 200 self.__a = 300 def grok(self): print(self.grok.__name__) def _grok(self): print(self._grok.__name__) def __grok(self): print(self.__grok.__name__) mc = MyClass() mc.A = 20 print(mc.A, MyClass.A) del mc.A print(mc.A) print(MyClass.A) print(MyClass._A) #print(MyClass.__A) print(MyClass._MyClass__A) MyClass.blah() MyClass._blah() MyClass._MyClass__blah() mc.grok() mc._grok() mc._MyClass__grok() # - # ### Shadowing # # Talk about changing the value of a class attribute from a instance and how it then creates a shadow of the class attribute and will forever diverge (except if it is deleted). # + class MyClass: A = 10 print("Class variable: ", MyClass.A) mc = MyClass() mc.A mc.A = 100 print("Shadow variable: ", mc.A, " Class variable: ", MyClass.A) MyClass.A = 1000 print("Shadow variable: ", mc.A, " Class variable: ", MyClass.A) del mc.A print("Restored Class variable", mc.A, " Class variable: ", MyClass.A) # - # ## Dynamic # # Python is a dynamic language so it shouldn't be surprising that classes are also dynamic. So, what does that mean? Well, in typed languages it is necessary to define all of the detail of the type before it can be used -- including classes because they are a type. Dynamic languages like Python allow types to change over time. # # Like nearly all types in Python, classes are backed by a dictionary. The attribute __dict__ holds the attributes of a class. # + class MyClass: def __init__(self): self.a = 10 self.b = 20 def add(self): return self.a + self.b mc = MyClass() print(mc.add()) print(mc.__dict__) mc.c = 30 print(mc.__dict__) # myclass.py # define MyClass # otherfile.py # import myclass # mc = myclass.MyClass() # import myclass.MyClass # mc = MyClass() # from myclass import MyClass # mc = MyClass() # - # **Note: Many ORM modules use the ability to dynamically create classes and attributes that match the data being received from a database or from a web page.** # # # ## Inheritance # # Python supports class inheritance including multiple inheritance. In the class definition, after the class name, a parameter list of base classes can be passed: # + class BaseClass: A = 10 class MyClass(BaseClass): pass mc = MyClass() print(mc.A) # - # ### Overriding # # Overriding is the ability to define the same method is a child class as has been defined in a base class. There are two reasons why overriding is used: # # * To replace the default behavior of the base class without changing the class semantics # * To extend the functionality in the base class either by calling the base class first and modifying the results or changing the inputs to the base class method to change the results. # # # #### Replacing # + class BaseClass: def method(self): print("doing base method") class MyClass(BaseClass): def method(self): print("doing myclass method") mc = MyClass() mc.method() # - # #### Extending # + class BaseClass: def method1(self): print("doing base method1") def method2(self): print("doing base method2") class MyClass(BaseClass): def method1(self): super().method1() print("doing myclass method1") def method2(self): print("doing myclass method2") super().method2() mc = MyClass() mc.method1() mc.method2() # - # ## Accessors # # In Python, one can access the instance attributes directly using 'dot' notation. However, sometimes attributes are not simple variables and they must be derived, e.g., a calulcation or query on a database. Python provides a several methods for creating attribute accessors. # # The most ovbious accessor is a method. It is as simple as defining a method (or two) on the class, known as getter and setter methods. Typically, such methods include the words 'get' and 'set'. # + class MyClass: def __init__(self, a, b): self._a = a self._b = b def set_a(self, value): self._a = value def get_a(self): return self._a def set_b(self, value): self._b = value def get_b(self): return self._b mc = MyClass(1, 2) print(mc.get_a()) print(mc.get_b()) mc.set_a(2) mc.set_b(4) print(mc.get_a()) print(mc.get_b()) # - # While there is nothing particularly wrong with setter/getter methods, they expose a particular implementation choice. OOP purists would insist that the client should not have to know that an attribute is implemented as data vs a function. That's not an invalid point, by the way. To that point, Python has two main primary ways to define attributes or properties on a class: # # * property() # * @property # ### Property() # # The property() function is a builtin function that accepts four arguments: # # * fget - a function for getting an attribute value # * fset - a function for setting an attribute value # * fdel - a function for deleting an attribute value # * doc - creates a docstring for the attribute # # The result provides a way to hide the implementation of the property x. Access is as if it were an data attribute, but there is a transparent intervening function. # + class MyClass: def __init__(self): self._x = None def getx(self): print("this is getx") return self._x def setx(self, value): print("this is setx") self._x = value def delx(self): print("this is delx") del self._x x = property(getx, setx, delx, "I'm the 'x' property.") mc = MyClass() mc.x = 10 print(mc.x) del mc.x print(mc.x) # - # ### @property # # The other way to create properties on a class is to use the property decorator and the corresponding setter/deleter decorators. # + class MyClass: def __init__(self): self._x = None @property def x(self): """I'm the 'x' property.""" print('this is @property x') return self._x @x.setter def x(self, value): print('this is @x.setter') self._x = value @x.deleter def x(self): print('this is @x.deleter') del self._x mc = MyClass() mc.x = 10 print(mc.x) del mc.x print(mc.x) # - # ## Inheritance # # As an object-oriented language, Python supports inheritance. Python classes can inherit data (attributes) and behavior (methods) from the parent or base class. The syntax for inheriting is: # + class BaseClass: pass class DerivedClass(BaseClass): pass # - # Below is a basic class from which we will build an class hierarchy using inheritance. # + class Person: def __str__(self): return self.Name() def __init__(self, first_name, last_name): self._first_name = first_name self._last_name = last_name def Name(self): return self._first_name + ' ' + self._last_name def Income(self): return 0.0 p = Person('Tim', 'Slator') print(p) print(p.Name()) # - # ### Is-A Relationship # # It is important to understand what inheritance means. Inheritance implies a relationship between the derived (child) class and the base (parent) class. That relationship is described as 'is-a', as in, is a . # # For example, above we defined a Person class. Now, any class that derives from Person must be describable using the 'is-a' relationship. How about an employee? customer? a boss (its debatable whether your boss is a person)? Friend? Student? etc. # # **Note: No object-oriented language enforces these abstractions so it is up to you to do so when you program** # Let's create an Employee class that inherits from Person and adds an attribute of 'job'. # + class Employee(Person): def __str__(self): ''' Explanation: __str__ is implemented in Person. It prints the attributes of Person: first_name and last_name We want to print all attributes: first_name, last_name, and job There are a couple of ways to get first_name and last_name: 1. invoke the __str__ method in the super class 2. invoke the Name method in the super class 3. reimplement __str__ (copy-paste coding is not recommended) Note: Here we are extending (or building on) the existing functionality which is the whole point of Object-Oriented programming. ''' return super().__str__() + ', ' + str(self._job) #Alternate approach: #return super().Name() + ', ' + str(self._job) def __init__(self, first_name, last_name, job): ''' Explanation: __init__ is implemented in Person. It initializes the attributes of Person: first_name and last_name We want to initialize the Person class with first_name and last_name then initialize the Employee attribute job. Again, there are a couple of ways to do this: 1. invoke __init__ on the super class 2. set first_name and last_name directory ''' super().__init__(first_name, last_name) #Alternate: #self._first_name = first_name #self._last_name = last_name #Note: Not recommended to reimplement what is already in the base class. One advantage #of object-oriented programming is to leverage existing code. By extension, that also #allows leveraging existing testing. Reimplementing code that has already been tested #potentially impacts schedule and product quality. self._job = job def Job(self): ''' Explanation: Nothing to do wrt to inheritance, just return the job. ''' return self._job e = Employee('Tim', 'Slator', 'chief bottle washer (CBW)') print(e) print('{last_name}, {first_name} : {job}'.format(last_name=e._last_name, first_name=e._first_name, job=e._job)) print(e.Job()) # We get this from Employee class print(e.Name()) # We get this from Person class # Just to demonstrate that Person does not have a job attribute p = Person('Tim', 'Slator') print(p._job) # AttributeError generated # - # ## Overriding # # Above we saw an example of extending existing functionality by calling the base class method and then doing some additional work. Sometimes base classes have no implementation or a default implementation that you don't want. In that case, you can override the method, that is, hide the base class implementation, and provide you own implementation. We do that with the Income method. # + class Employee(Person): def __str__(self): return super().Name() + ', ' + str(self._job) + ': ' + str(self._salary) def __init__(self, first_name, last_name, job, salary): super().__init__(first_name, last_name) self._job = job self._salary = salary def Job(self): return self._job def Income(self): return self._salary e = Employee('Tim', 'Slator', 'chief bottle washer (CBW)', 'peanuts') print(e.Name()) print(e.Job()) print(e.Income()) print(e) # - # ## Multiple Inheritance # # # + class Vehicle: def __str__(self): return "Vehicle" class LandVehicle(Vehicle): def __str__(self): return "LandVehicle::" + super().__str__() class WaterVehicle(Vehicle): def __str__(self): return "WaterVehicle::" + super().__str__() class Car(LandVehicle): def __str__(self): return 'Car::' + super().__str__() class Boat(WaterVehicle): def __str__(self): return "Boat::" + super().__str__() class Hovercraft(LandVehicle, WaterVehicle): def __str__(self): return "Hovercraft::" + super().__str__() c = Car() b = Boat() hc = Hovercraft() print(c) print(b) print(hc) # - # ## Abstract Class # # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="LBk9dKhMXHgZ" # *CRIE UM PROGRAMA QUE LEIA UM NUMERO INTEIRO E MOSTRE NA TELA SE ELE E PAR OU IMPAR*. # + id="x49yuWh5XUC9" colab={"base_uri": "https://localhost:8080/"} outputId="bcdfba9a-16d3-4e9e-fa32-c910179829c5" numero = int(input('Digite um numero: ')) resultado = numero%2 if resultado ==0: print('O numero {} é par'.format(numero)) else: print('O numero {} é impar'.format(numero)) # + id="ZIkldApfhcuF" colab={"base_uri": "https://localhost:8080/"} outputId="97a9040f-7559-4b5d-f13d-6dd09df27fae" n=int(input('Digite um numero:')) resultado = n%2 if resultado==0: print('O numero {} é par'.format(n)) else: print('O numero {} é impar'.format(n)) # + colab={"base_uri": "https://localhost:8080/"} id="f3dgj5bQ2fHY" outputId="f6091b79-e843-43b8-e9bf-827deec2c60c" n=int(input('Digite um numero')) resultado=n%2 if resultado==0: print('O numero {} digitado é par'.format(n)) else: print('O numero {} digitado é impar'.format(n)) # + id="gISAJmSj4BN7" # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib,aplpy from astropy.io import fits from general_functions import * import matplotlib.pyplot as plt # + font = {'size' : 14, 'family' : 'serif', 'serif' : 'cm'} plt.rc('font', **font) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['lines.linewidth'] = 1 plt.rcParams['axes.linewidth'] = 1 #Set to true to save pdf versions of figures save_figs = True # - # The files used to make the following plot are: r_image_decals = 'HCG16_DECaLS_r_cutout.fits' grz_image_decals = 'HCG16_DECaLS_cutout.jpeg' obj_list = ['NW_clump','E_clump','S_clump'] # #+'_mom0th.fits' or +'_mom1st.fits' # 1. An $r$-band DECaLS fits image of HCG 16. # 2. A combined $grz$ jpeg image from DECaLS covering exactly the same field. # # These files were downloaded directly from the [DECaLS public website](http://legacysurvey.org/). The exact parameters defining the region and pixel size of these images is contained in the [pipeline.yml](pipeline.yml) file. # # 3. Moment 0 and 1 maps of each candidate tidal dwarf galaxy. # # The moment 0 and 1 maps of the galaxies were generated in the *imaging* step of the workflow using CASA. The exact steps are included in the [imaging.py](casa/imaging.py) script. The masks used to make these moment maps were constructed manually using the [SlicerAstro](http://github.com/Punzo/SlicerAstro) software package. They were downloaded along with the raw data from the EUDAT service [B2SHARE](http://b2share.eudat.eu) at the beginnning of the workflow execution. The exact location of the data are given in the [pipeline.yml](pipeline.yml) file. # Make moment 0 contour overlays and moment 1 maps. # + #Initialise figure using DECaLS r-band image f = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1]) #Display DECaLS grz image f.show_rgb(grz_image_decals) #Recentre and resize f.recenter(32.356, -10.125, radius=1.5/60.) #Overlay HI contours f.show_contour(data='NW_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0], colors='lime',levels=numpy.arange(0.1,5.,0.05)) #Add grid lines f.add_grid() f.grid.set_color('black') #Save if save_figs: plt.savefig('Fig15-NW_clump_mom0_cont.pdf') # + #Clip the moment 1 map mask_mom1(gal='NW_clump',level=0.1) #Initialise figure for clipped map f = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1]) #Recentre and resize f.recenter(32.356, -10.125, radius=1.5/60.) #Set colourbar scale f.show_colorscale(cmap='jet',vmin=3530.,vmax=3580.) #Add grid lines f.add_grid() f.grid.set_color('black') #Show and label colourbar f.add_colorbar() f.colorbar.set_axis_label_text('$V_\mathrm{opt}$ [km/s]') #Add beam ellipse f.add_beam() f.beam.set_color('k') f.beam.set_corner('bottom right') #Save if save_figs: plt.savefig('Fig15-NW_clump_mom1.pdf') # + #Initialise figure using DECaLS r-band image f = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1]) #Display DECaLS grz image f.show_rgb(grz_image_decals) #Recentre and resize f.recenter(32.463, -10.181, radius=1.5/60.) #Overlay HI contours f.show_contour(data='E_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0], colors='lime',levels=numpy.arange(0.1,5.,0.05)) #Add grid lines f.add_grid() f.grid.set_color('black') #Save if save_figs: plt.savefig('Fig15-E_clump_mom0_cont.pdf') # + #Clip the moment 1 map mask_mom1(gal='E_clump',level=0.1) #Initialise figure for clipped map f = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1]) #Recentre and resize f.recenter(32.463, -10.181, radius=1.5/60.) #Set colourbar scale f.show_colorscale(cmap='jet',vmin=3875.,vmax=3925.) #Add grid lines f.add_grid() f.grid.set_color('black') #Show and label colourbar f.add_colorbar() f.colorbar.set_axis_label_text('$V_\mathrm{opt}$ [km/s]') #Add beam ellipse f.add_beam() f.beam.set_color('k') f.beam.set_corner('bottom right') #Save if save_figs: plt.savefig('Fig15-E_clump_mom1.pdf') # + #Initialise figure using DECaLS r-band image f = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1]) #Display DECaLS grz image f.show_rgb(grz_image_decals) #Recentre and resize f.recenter(32.475, -10.215, radius=1.5/60.) #Overlay HI contours f.show_contour(data='S_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0], colors='lime',levels=numpy.arange(0.1,5.,0.05)) #Add grid lines f.add_grid() f.grid.set_color('black') #Save if save_figs: plt.savefig('Fig15-S_clump_mom0_cont.pdf') # + #Clip the moment 1 map mask_mom1(gal='S_clump',level=0.1) #Initialise figure for clipped map f = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1]) #Recentre and resize f.recenter(32.475, -10.215, radius=1.5/60.) #Set colourbar scale f.show_colorscale(cmap='jet',vmin=4050.,vmax=4100.) #Add grid lines f.add_grid() f.grid.set_color('black') #Show and label colourbar f.add_colorbar() f.colorbar.set_axis_label_text('$V_\mathrm{opt}$ [km/s]') #Add beam ellipse f.add_beam() f.beam.set_color('k') f.beam.set_corner('bottom right') #Save if save_figs: plt.savefig('Fig15-S_clump_mom1.pdf') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline import gdr2_completeness.tap_queries as tap # + # Here we download the 130pc volume-complete GDR2mock sample used as CMD prior for nearby planetary hosts distance = 0.130 # Specifying the tap query Select_what = """ * """ under_condition = """AND 1/parallax < %f""" %(distance) folder = 'data/130pc/' hpx_level = 2 service = 'GDR2mock' tap.tap_query_gdr2_hpx_sliced(service = service, hpx_level = hpx_level, folder = folder, Select_what = Select_what, under_condition = under_condition, verbose = True, test_1st_hpx_only=False) # Then we combine the files to one structured array tap.stack_healpix_files(folder) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Emotion Recognition # Before diving into much of the code for detectioning emotion, # you must extraction features that can be used for classification of emotions. # The features we'll use will be derived from facial landmarks identified by dlib. # # You can download a trained facial shape predictor from dlib's site. # Do the following: # # ```bash # # cd ~/Downloads # wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 # bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2 # ``` # + # import the necessary packages import cv2 import dlib #Set up some webcam objects video_capture = cv2.VideoCapture(0) ROOT = "/home/jeff/Jupyter-Notebooks/DataSets/Models/" MODEL = "shape_predictor_68_face_landmarks.dat" # initialize dlib's face detector (HOG-based) and then create the facial landmark predictor detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(ROOT + MODEL) while True: ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) clahe_image = clahe.apply(gray) detections = detector(clahe_image, 1) # Detect the faces in the image for k,d in enumerate(detections): # For each detected face shape = predictor(clahe_image, d) # Get coordinates for i in range(1,68): # There are 68 landmark points on each face cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=2) # For each point, draw a red circle with thickness2 on the original frame cv2.imshow("image", frame) #Display the frame # exit program when the user presses 'q' or esc k = cv2.waitKey(1) & 0xFF if k == 27 or k == ord('q'): break # clean up video_capture.release() cv2.destroyAllWindows() # - # With the facial landmarks in hand, # you need to find ways to transform these dots overlaid on your face into features to feed the classifer. # How you extract features from your facial landmark source data is actually where a lot of the research is. # + import cv2 import glob import random import math import numpy as np import dlib import itertools from sklearn.svm import SVC ROOT = "/home/jeff/Jupyter-Notebooks/DataSets/Models/" MODEL = "shape_predictor_68_face_landmarks.dat" emotions = ["anger", "contempt", "disgust", "fear", "happiness", "neutral", "sadness", "surprise"] #Emotion list clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(ROOT + MODEL) #Or set this to whatever you named the downloaded file clf = SVC(kernel='linear', probability=True, tol=1e-3)#, verbose = True) #Set the classifier as a support vector machines with polynomial kernel data = {} #Make dictionary for all values #data['landmarks_vectorised'] = [] def get_files(emotion): #Define function to get file list, randomly shuffle it and split 80/20 files = glob.glob("dataset\\%s\\*" %emotion) random.shuffle(files) training = files[:int(len(files)*0.8)] #get first 80% of file list prediction = files[-int(len(files)*0.2):] #get last 20% of file list return training, prediction def get_landmarks(image): detections = detector(image, 1) for k,d in enumerate(detections): #For all detected face instances individually shape = predictor(image, d) #Draw Facial Landmarks with the predictor class xlist = [] ylist = [] for i in range(1,68): #Store X and Y coordinates in two lists xlist.append(float(shape.part(i).x)) ylist.append(float(shape.part(i).y)) xmean = np.mean(xlist) ymean = np.mean(ylist) xcentral = [(x-xmean) for x in xlist] ycentral = [(y-ymean) for y in ylist] landmarks_vectorised = [] for x, y, w, z in zip(xcentral, ycentral, xlist, ylist): landmarks_vectorised.append(w) landmarks_vectorised.append(z) meannp = np.asarray((ymean,xmean)) coornp = np.asarray((z,w)) dist = np.linalg.norm(coornp-meannp) landmarks_vectorised.append(dist) landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi)) data['landmarks_vectorised'] = landmarks_vectorised if len(detections) < 1: data['landmarks_vestorised'] = "error" def make_sets(): training_data = [] training_labels = [] prediction_data = [] prediction_labels = [] for emotion in emotions: print(" working on %s" %emotion) training, prediction = get_files(emotion) #Append data to training and prediction list, and generate labels 0-7 for item in training: image = cv2.imread(item) #open image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale clahe_image = clahe.apply(gray) get_landmarks(clahe_image) if data['landmarks_vectorised'] == "error": print("no face detected on this one") else: training_data.append(data['landmarks_vectorised']) #append image array to training data list training_labels.append(emotions.index(emotion)) for item in prediction: image = cv2.imread(item) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) clahe_image = clahe.apply(gray) get_landmarks(clahe_image) if data['landmarks_vectorised'] == "error": print("no face detected on this one") else: prediction_data.append(data['landmarks_vectorised']) prediction_labels.append(emotions.index(emotion)) return training_data, training_labels, prediction_data, prediction_labels accur_lin = [] for i in range(0,10): print("Making sets %s" %i) #Make sets by random sampling 80/20% training_data, training_labels, prediction_data, prediction_labels = make_sets() npar_train = np.array(training_data) #Turn the training set into a numpy array for the classifier npar_trainlabs = np.array(training_labels) print("training SVM linear %s" %i) #train SVM clf.fit(npar_train, training_labels) print("getting accuracies %s" %i) #Use score() function to get accuracy npar_pred = np.array(prediction_data) pred_lin = clf.score(npar_pred, prediction_labels) print("linear: ", pred_lin) accur_lin.append(pred_lin) #Store accuracy in a list print("Mean value lin svm: %s" %np.mean(accur_lin)) #FGet mean accuracy of the 10 runs # - # # Sources # * [Emotion Recognition using Facial Landmarks, Python, DLib and OpenCV](http://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/) # * [Real-time facial landmark detection with OpenCV, Python, and dlib](https://www.pyimagesearch.com/2017/04/17/real-time-facial-landmark-detection-opencv-python-dlib/) # * [20+ Emotion Recognition APIs That Will Leave You Impressed, and Concerned](https://nordicapis.com/20-emotion-recognition-apis-that-will-leave-you-impressed-and-concerned/) # * [Google's cloud video intelligence](https://cloud.google.com/video-intelligence/) # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Agenda #What is Mapping? #Use Mapping in DataFrames # - import pandas as pd from pandas import DataFrame df= DataFrame({ 'country': ['USA', 'China', 'Japan', 'Germany', 'India'], 'GDP': [19,12,4,3,2] }) print(df) population_map= { 'USA' : 325, 'China': 1421, 'Japan': 127, 'Germany': 82, 'India': 1338 } print(population_map) df['Population']= df['country'].map(population_map) print(df) # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # # Setup # # Note: you may have to add/clone/checkout some of these packages # Notebook based on works: http://www.breloff.com # + # this re-exports Transformations, StochasticOptimization, Penalties, and ObjectiveFunctions using Learn using MLPlots # my version of ML iteration. Hopefully will be replaced with what's currently in MLDataUtils dev branch using StochasticOptimization.Iteration import MLDataUtils: rescale! # for loading the data import MNIST # for plotting using StatPlots, MLPlots #gr(leg=false, linealpha=0.5, legendfont=font(7), fmt=:png) gr(leg=false, linealpha=0.5) # - # # Helper functions # + # create a one-hot matrix given class labels # TODO: this should be added as a utility in MLDataUtils function to_one_hot(y::AbstractVector) yint = map(yi->round(Int,yi)+1, y) nclasses = maximum(yint) hot = zeros(Float64, nclasses, length(y)) for (i,yi) in enumerate(yint) hot[yi,i] = 1.0 end hot end # randomly pick a subset of testdata (size = totcount) and compute the total loss function my_test_loss(obj, testdata, totcount = 500) totloss = 0.0 totcorrect = 0 for (x,y) in each_obs(rand(each_obs(testdata), totcount)) totloss += transform!(obj,y,x) # logistic version: # ŷ = output_value(obj.transformation)[1] # correct = (ŷ > 0.5 && y > 0.5) || (ŷ <= 0.5 && y < 0.5) # softmax version: ŷ = output_value(obj.transformation) chosen_idx = indmax(ŷ) correct = y[chosen_idx] > 0 totcorrect += correct end totloss, totcorrect/totcount end # - # # Set up the dataset # + using MNIST # our data: x_train, y_train = MNIST.traindata() x_test, y_test = MNIST.testdata() # normalize the input data given μ/σ for the input training data # note: scale both train and test sets using the train data #μ, σ = rescale!(x_train) #rescale!(x_test, μ, σ) xmin, xmax = extrema(x_train) x_train .= 2 .* (x_train .- xmin) ./ (xmax - xmin) .- 1 x_test .= 2 .* (x_test .- xmin) ./ (xmax - xmin) .- 1 # convert y data to one-hot y_train, y_test = map(to_one_hot, (y_train, y_test)) # optional: limit to only 0/1 digits for easier training # to_isone(y::AbstractVector) = (z = Array(eltype(y), 1, length(y)); map!(yi->float(yi==1.0), z, y)) # y_train, y_test = map(to_isone, (y_train, y_test)) # train = filterobs(i -> y_train[i] < 1.5, x_train, y_train) # test = filterobs(i -> y_test[i] < 1.5, x_test, y_test) # store as tuples to make it easier train = (x_train, y_train) test = (x_test, y_test); # - # # Construct our model and objective function # + # Activation Functions => :logistic, :tanh, :softsign, :relu, :softplus, :sinusoid, :gaussian, :threshold, :sign nin, nh, nout = 784, [50,50], 10 # this is our gradient calculation method grad_calc = :backprop #grad_calc = :dfa # create a feedforward neural net with softplus activations and softmax output t = nnet(nin, nout, nh, :sign, :softmax, grad_calc=grad_calc) # create an objective function with L2 penalty and an implicit cross entropy loss layer #penalty = NoPenalty() obj = objective(t) # - function objective(chain::Chain, penalty::Penalty = NoPenalty()) T = typeof(chain[end]) loss = if T <: Affine || T <: Activation{:identity} L2DistLoss() elseif T <: Activation{:logistic} CrossentropyLoss() elseif T <: Activation{:softmax} CrossEntropy(output_length(chain[end])) else error("Can't pick a default loss for $T... choose it explicitly.") end objective(chain, loss, penalty) end # # optional: set up plotting # + # the parts of the plot chainplt = ChainPlot(t, maxn=10, tickfont=font(5)) lossplt = TracePlot(2, title="Loss", ylim=(0,Inf), leg=true, lab=["Train" "Test"]) accuracyplt = TracePlot(2, title="Accuracy", ylim=(0.4,1), leg=true, lab=["Train" "Test"]) hmplt = heatmap(rand(28,28), ratio=1, title="outgoing wgt") hmplt2 = heatmap(rand(28,28), ratio=1, title="input grad") # put together the full plot... a ChainPlot with loss, accuracy, and the heatmap plot( chainplt.plt, lossplt.plt, accuracyplt.plt, hmplt, hmplt2, size = (1200,800), layout=@layout([a{0.8h}; grid(1,2){0.75w} grid(1,2)]) ) anim = nothing #anim = Animation() # this is our custom callback which will be called on every 100 iterations # note: we do the plotting here. tracer = IterFunction((obj, i) -> begin # sample points from the test set and compute/save the loss @show i if mod1(i,500)==500 # training loss trainloss, trainaccuracy = my_test_loss(obj, train, 200) @show trainloss, trainaccuracy testloss, testaccuracy = my_test_loss(obj, test, 200) @show testloss, testaccuracy push!(lossplt, i, [trainloss, testloss]) push!(accuracyplt, i, [trainaccuracy, testaccuracy]) end # add transformation data update!(chainplt) # update the heatmap of the total outgoing weight from each pixel t1 = isa(t[1], InputNorm) ? t[2] : t[1] pixel_importance = reshape(sum(t1.params.views[1],1), 28, 28) hmplt[1][1][:z].surf[:] = pixel_importance pixel_importance = reshape(abs(input_grad(t)),28,28) # another possible metric hmplt2[1][1][:z].surf[:] = pixel_importance # handle animation frames/output anim == nothing || frame(anim) # update the plot display #gui() inline() end, every=100) # trace once before we start learning to see initial values tracer.f(obj, 0) # - # # Create a MetaLearner learner = make_learner( # averages the gradient over minibatches, updating params using the Adam method GradientLearner(1e-3, SGD(0.7)), # our custom iteration method tracer, # shorthand to add a MaxIter(10000) maxiter = 1500 ) # # Learn! # do the learning... average over minibatches of size 5 for maxiter iterations # learn!(obj, learner, infinite_batches(train, size=5)) learn!(obj, learner, infinite_obs(train)) :dfa # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.sys.path.append(os.path.dirname(os.path.abspath('.'))) # ## 数据准备 # + import numpy as np data = np.array([[2.5, 3.5, 3, 3.5, 2.5, 3], [3, 3.5, 1.5, 5, 3.5, 3], [2.5, 3, 0, 3.5, 0, 4], [0, 3.5, 3, 0, 4, 4], [3, 4, 2, 3, 2, 3], [3, 4, 0, 5, 3.5, 3], [0, 4.5, 0, 4, 1, 0]]) n_users, n_items = data.shape # - # 有了user-item数据后,可以计算两两user之间的相似度: # + from metrics.pairwise.euclidean_distances import euclidean_distances dist_mat=euclidean_distances(data) # 两两用户之间的距离矩阵 sim_mat=1/(1+dist_mat) # 将距离转化成相似度 # - # 指定一个用户$user_{i}$,首先找到跟其最相似的前$k$个用户: i = 6 # 最后一个用户 k = 3 # 使用最相似的前3个用户 top_k_sim = sim_mat[i][sim_mat[i] != 1].argsort( )[-1:-k-1:-1] # 首先排除相似度为1的用户,然后取前k个最相似的用户 # 推荐的本质就是为用户推荐其未曾见过或用过的东西,所以找出指定用户未评分的物品,然后计算相似用户对该物品的加权评分: # + cand_items_mask = (data[i] == 0) # 提取未评价物品的布尔索引 cand_items = np.arange(len(data[i]))[cand_items_mask] # 候选推荐物品的索引 # 相似用户对候选物品的评分矩阵,形状为(top_users,cand_items) scores = data[top_k_sim, :][:, cand_items_mask] # 对已评分用户相似度的求和,作为分母 denominator = np.sum( sim_mat[i, top_k_sim], axis=0) scores = np.sum( scores * sim_mat[i, top_k_sim].reshape(-1, 1), axis=0) # 以相似度加权并求和 scores = scores/denominator # 除以相似度的累加 idx = np.argsort(scores)[::-1] # 按分数排序后的索引 scores = scores[idx] cand_items = cand_items[idx] print(scores, cand_items) # - # 封装测试: # + def CF(data, i, k=5): ''' i: 用户idx k: 使用前k个最相似的用户 ''' dist_mat = euclidean_distances(data) # 两两row之间的距离矩阵 sim_mat = 1/(1+dist_mat) # 将距离转化成相似度 top_k_sim = sim_mat[i][sim_mat[i] != 1].argsort()[-1:-k-1:-1] cand_items_msak = (data[i] == 0) cand_items = np.arange(len(data[i]))[cand_items_msak] # 相似用户对候选物品的评分矩阵,形状为(top_users,cand_items) scores = data[top_k_sim, :][:, cand_items_msak] # 对已评分用户相似度的求和,作为分母 denominator = np.sum( sim_mat[i, top_k_sim], axis=0) scores = np.sum( scores * sim_mat[i, top_k_sim].reshape(-1, 1), axis=0) # 以相似度加权并求和 scores = scores/denominator # 除以相似度的累加 idx = np.argsort(scores)[::-1] # 按分数排序后的索引 scores = scores[idx] cand_items = cand_items[idx] return [(item, score) for item, score in zip(cand_items, scores)] CF(data, 6, 3) # - # 如果需要针对物品推荐用户,将data矩阵转置即可。 data_T = data.T CF(data_T, 2, 2) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- """ Purpose: Predict the birth weight with different variables of the parents. The steps are the following: 1) Importing and analysing the data 2) Missing values detection, Flagging and Imputation 3) Feature Engineering 4) Correlation Analysis 5) Linear model with the whole dataset 6) OLS/lasso/ARD Trials 7) Comparing the score of different regression models """ # ## Importing and Analysing the data # + import pandas as pd #data science essentials import matplotlib.pyplot as plt #essential graphical output import seaborn as sns #enhanced graphical output import statsmodels.formula.api as smf #mathematical essentials from sklearn.model_selection import train_test_split #training dataframe tools import numpy as np #regression modeling from scipy.stats import iqr # IQR for Outlier Detection from sklearn.linear_model import LinearRegression # Linear Regression #declaring variable of the file file = './birthweight_low.xlsx' #storing the excel that is going to be the dataframe into a variable birthweight = pd.read_excel (io = file, sheet_name = 0, header = 0) # setting pandas print options pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) #removing the columns that won't help us to predict the birthweight birthweight = birthweight.drop(columns = ['fmaps', 'omaps'], axis = 0) #printing the information of each column print(birthweight.info()) #printing the sum of the missing values #printing the 15 first rows birthweight.head(n=15) # - #Analysing the composision of each column birthweight.describe(include = 'number').round(decimals = 2) # ## Missing values detection, Flagging and Imputation #printing the quantity of missing value per feature print('*' * 50) print(f"""Sum of missing values: {birthweight.isnull().sum()}""") print('*' * 50) #for every col in birthweight df for col in birthweight: #analizing if each column has missing value if birthweight[col].isnull().sum() > 0: #creating a m column birthweight['m_' + col] = birthweight[col].isnull().astype(int) # + # Dropping NA values bw_dropped = birthweight.dropna() titles = ['Mother Education', 'Father Education', 'Prenatal Visits'] cols = ['meduc', 'feduc', 'npvis'] i = 0 for col in cols: # calculating the mean mean = bw_dropped[col].mean() # calculating the median median = bw_dropped[col].median() # calculating the histplot sns.histplot(bw_dropped[col], color='grey') # Mean Line plt.axvline(mean, color='r', linestyle='--') # Median Line plt.axvline(median, color='b', linestyle='-') #legend plt.legend({'Mean':mean, 'Median':median}) plt.title (titles[i]) i += 1 #showing the plot plt.show() # + # Filling all missing values with median #mother education fill_value = bw_dropped['meduc'].median() birthweight['meduc'].fillna (value = fill_value, inplace = True) #prenatal visits fill_value = bw_dropped['npvis'].median() birthweight['npvis'].fillna(value = fill_value, inplace = True) #father education fill_value = bw_dropped['feduc'].median() birthweight['feduc'].fillna(value = fill_value, inplace = True) birthweight.isnull().sum() # + #declaring the columns that are not going to be plotted noplot = ['m_meduc', 'm_npvis', 'm_feduc'] #making a loop of the df columns for col in birthweight: #evaluating that the column is not in notplot list if col not in noplot: #declaring the size of the plot fig, ax = plt.subplots (figsize = (12, 6)) #performing the histogram plot sns.histplot( data = birthweight, x = col, kde = True, color = 'grey' ) #tittle of the plot plt.title(label = "Distribution of Newborn per " + col) #X Lable of the plot plt.xlabel(xlabel = col.capitalize()) #Y Lable of the plot plt.ylabel(ylabel = "Count") plt.show() #showing the plot #showing the skewness of all the columns print(f"""skewness: {birthweight[col].skew().round(decimals = 2)}""") print('*' * 120) # - # Checking different quantiles for outlier detection bw_quantiles = birthweight.loc[:, :].quantile([0.05, 0.20, 0.40, 0.60, 0.80, 0.95, 1.00]) #displaying df quantiles bw_quantiles # ## Feature Engineering # + #making a list of the variables that is not going to be displayed data =['bwght', 'log_bwght'] log = ['npvis'] for col in birthweight: if col not in (data): fig, ax = plt.subplots(figsize =(16, 8)) # developing a boxplot sns_boxplot = sns.boxplot(x = col, y = 'bwght', data = birthweight) plt.axhline(y = 3500, color = "purple", linestyle = '--') # titles and labels plt.title(label = 'Relationship between ' + col.capitalize() + ' and Birth Weight') plt.xlabel(xlabel = col.capitalize()) plt.ylabel(ylabel = 'Birth Weight') plt.setp(sns_boxplot.get_xticklabels(), rotation=90) plt.show() for col in log: if birthweight[col].min() == 0 and birthweight[col].value_counts().count() == 2: break elif birthweight[col].min() == 0: birthweight['log_' + col] = np.log(birthweight[col] + 0.99) elif birthweight[col].min() > 0: birthweight['log_' + col] = np.log(birthweight[col]) else: print('there is an error') birthweight['log_' + 'bwght'] = np.log(birthweight['bwght']) # + birthweight['cigsadddrink'] = birthweight['drink'] + birthweight['cigs'] # making a iter row function to create distinct help columns for row, col in birthweight.iterrows(): #creating a range of mother age if birthweight.loc[row, 'mage'] < 31: birthweight.loc[row, 'h2_mage'] = 'first' elif birthweight.loc[row, 'mage'] < 41: birthweight.loc[row, 'h2_mage'] = 'second' elif birthweight.loc[row, 'mage'] < 52: birthweight.loc[row, 'h2_mage'] = 'third' elif birthweight.loc[row, 'mage'] > 51: birthweight.loc[row, 'h2_mage'] = 'fourth' else: print("error1") #creating a range of cigarrettes if birthweight.loc[row, 'cigs'] <= 1: birthweight.loc[row, 'h2_cig'] = 'first' elif birthweight.loc[row, 'cigs'] < 21: birthweight.loc[row, 'h2_cig'] = 'second' elif birthweight.loc[row, 'cigs'] >= 21: birthweight.loc[row, 'h2_cig'] = 'third' else: print("error2") #creating a range of drinks if birthweight.loc[row, 'drink'] < 9: birthweight.loc[row, 'h2_drink'] = 'first' elif birthweight.loc[row, 'drink'] < 11: birthweight.loc[row, 'h2_drink'] = 'second' elif birthweight.loc[row, 'drink'] > 10: birthweight.loc[row, 'h2_drink'] = 'third' else: print("error4") #creating a range of father education if birthweight.loc[row, 'feduc'] < 9: birthweight.loc[row, 'h2_feduc'] = 'first' elif birthweight.loc[row, 'feduc'] < 12: birthweight.loc[row, 'h2_feduc'] = 'second' elif birthweight.loc[row, 'feduc'] > 11: birthweight.loc[row, 'h2_feduc'] = 'third' else: print("error5") #creating a range of mother education if birthweight.loc[row, 'meduc'] < 15: birthweight.loc[row, 'h2_meduc'] = 0 elif birthweight.loc[row, 'meduc'] >= 15: birthweight.loc[row, 'h2_meduc'] = 1 else: print("error5") #creating a range of cigarrettes plus drinks if birthweight.loc[row, 'cigsadddrink'] < 18: birthweight.loc[row, 'h2_cigsadddrink'] = 'first' elif birthweight.loc[row, 'cigsadddrink'] > 17: birthweight.loc[row, 'h2_cigsadddrink'] = 'second' else: print("error6") #obtaining the dummy variables temp_birthweight = pd.get_dummies(columns = ['h2_cig', 'h2_mage','h2_drink', 'h2_feduc','h2_cigsadddrink'], data = birthweight ) birthweight = temp_birthweight #different trials #birthweight['mageplowweight'] = birthweight['mage'] * birthweight['low_weight'] #birthweight['cigsplowweight'] = birthweight['cigs'] * birthweight['low_weight'] #birthweight['drinksplowweight'] = birthweight['drink'] * birthweight['low_weight'] #birthweight['drinksaddcigplowweight'] = (birthweight['drink'] + birthweight['cigs'] )* birthweight['low_weight'] #birthweight['log_magesqr'] = birthweight['log_mage'] * birthweight['log_mage'] #birthweight['magesqr'] = birthweight['mage'] * birthweight['mage'] #birthweight['fagesqr'] = birthweight['fage'] * birthweight['fage'] #birthweight['magesqraddfagesqr'] = birthweight['fagesqr'] + birthweight['magesqr'] #birthweight['monpresqr'] = birthweight['monpre'] * birthweight['monpre'] #birthweight['monpre:npvis'] = birthweight['monpre'] * birthweight['npvis'] #birthweight['drink:mage'] = birthweight['drink'] * birthweight['mage'] #birthweight['mblck:mage'] = birthweight['mage'] * birthweight['mblck'] #birthweight['cigs:mage'] = birthweight['mage'] * birthweight['cigs'] #birthweight['drinksaddcigs']= birthweight['drink'] * birthweight['cigs'] #birthweight['cigsadddrinkspermage'] = (birthweight['drink'] + birthweight['cigs']) * birthweight['mage'] #birthweight['cigssqr'] = birthweight['cigs'] * birthweight['cigs'] #birthweight['drinksqr'] = birthweight['drink'] * birthweight['drink'] #birthweight['mageaddfage'] = birthweight['mage'] * birthweight['fage'] #birthweight['cig:mwhte'] = birthweight['mwhte'] * birthweight['cigs'] #birthweight['npvissqr'] = birthweight['npvis'] * birthweight['npvis'] #birthweight['meducadddfedc'] = birthweight['feduc'] + birthweight['meduc'] #conditions = [ (birthweight['mblck'] > 0) & (birthweight['fblck'] > 0), birthweight['mblck'] < 1 ] #choices = [ 1, 0 ] #birthweight["parentsblack"] = np.select(conditions, choices, default=0) # - # ## Correlation among the independent features versus birthweight # + # creating a (Pearson) correlation matrix df_corr = birthweight.corr(method = 'pearson').round(2) # printing (Pearson) correlations with SalePrice print(df_corr.loc['bwght'].sort_values(ascending = False)) # + # making a copy of birthweight birthweight_explanatory = birthweight.copy() # dropping bwght and log_bwght from the explanatory variable set birthweight_explanatory = birthweight_explanatory.drop(['bwght', 'log_bwght'], axis = 1) # formatting each explanatory variable for statsmodels for val in birthweight_explanatory: print(f"{val} +") # - # ## First Linear model with whole dataset # #### I'am removing the columns h2_mage_first and h_meduc because are generating a high p_value # + # building a full model # blueprinting a model type lm_full = smf.ols(formula = """bwght ~ h2_cig_first + drink + h2_cig_third + h2_mage_fourth + h2_drink_first + h2_feduc_third + h2_cigsadddrink_first """, data = birthweight) # telling Python to run the data through the blueprint results_full = lm_full.fit() # printing the results results_full.summary() # - # ## OLS/Lasso/ARD Trials # + #defining the logarithmic target log_birthweight_target = birthweight.loc[:,'log_bwght'] #defining the normal target normal_birthweight_target = birthweight.loc[:,'bwght'] #defining the whole dataset removing certaing columns birthweight_data = birthweight.drop(['bwght', 'log_bwght'], axis = 1) #making the split value of the whole dataset x_train_FULL, x_test_FULL, y_train_FULL, y_test_FULL = train_test_split( birthweight_data, # x-variables normal_birthweight_target, # y-variable test_size = 0.25, random_state = 219) # + import sklearn.linear_model # linear models # INSTANTIATING a model object lasso_model = sklearn.linear_model.Lasso(alpha = 1.0, normalize = True) # default magitud # FITTING to the training data lasso_fit = lasso_model.fit(x_train_FULL, y_train_FULL) # PREDICTING on new data lasso_pred = lasso_fit.predict(x_test_FULL) # SCORING the results print('Lasso Training Score :', lasso_model.score(x_train_FULL, y_train_FULL).round(4)) print('Lasso Testing Score :', lasso_model.score(x_test_FULL, y_test_FULL).round(4)) # saving scoring data for future use lasso_train_score = lasso_model.score(x_train_FULL, y_train_FULL).round(4) # using R-square lasso_test_score = lasso_model.score(x_test_FULL, y_test_FULL).round(4) # using R-square lasso_test_gap = abs(lasso_train_score - lasso_test_score).round(4) # displaying and saving the gap between training and testing print('Lasso Train-Test Gap :', abs(lasso_test_gap).round(4)) # + # zipping each feature name to its coefficient lasso_model_values = zip(birthweight_data.columns, lasso_fit.coef_.round(decimals = 2)) # setting up a placeholder list to store model features lasso_model_lst = [('intercept', lasso_fit.intercept_.round(decimals = 2))] # printing out each feature-coefficient pair one by one for val in lasso_model_values: lasso_model_lst.append(val) # checking the results for pair in lasso_model_lst: print(pair) # - #declaring the features for future work coef = ['h2_cig_first', 'drink', 'h2_cig_third', 'h2_mage_first', 'h2_mage_fourth', 'h2_drink_first', 'h2_feduc_third', 'h2_meduc', 'h2_cigsadddrink_first'] # ### Lasso Model # + #declaring the OLS dataframe with the previous features declared ols_data = birthweight_data.loc[:, coef] # OLS p-value x-dataset (normal Y) x_train_OLS, x_test_OLS, y_train_OLS, y_test_OLS = train_test_split( ols_data, # x-variables normal_birthweight_target, # y-variable test_size = 0.25, random_state = 219) # INSTANTIATING a model object lasso_model = sklearn.linear_model.Lasso(alpha = 1.0, normalize = True) # default magitud # FITTING to the training data lasso_fit = lasso_model.fit(x_train_OLS, y_train_OLS) # PREDICTING on new data lasso_pred = lasso_fit.predict(x_test_OLS) # SCORING the results print('Lasso Training Score :', lasso_model.score(x_train_OLS, y_train_OLS).round(4)) print('Lasso Testing Score :', lasso_model.score(x_test_OLS, y_test_OLS).round(4)) # saving scoring data for future use lasso_train_score = lasso_model.score(x_train_OLS, y_train_OLS).round(4) # using R-square lasso_test_score = lasso_model.score(x_test_OLS, y_test_OLS).round(4) # using R-square lasso_test_gap = abs(lasso_train_score - lasso_test_score).round(4) # displaying and saving the gap between training and testing print('Lasso Train-Test Gap :', abs(lasso_test_gap).round(4)) # + # zipping each feature name to its coefficient lasso_model_values = zip(ols_data.columns, lasso_fit.coef_.round(decimals = 2)) # setting up a placeholder list to store model features lasso_model_lst = [('intercept', lasso_fit.intercept_.round(decimals = 2))] # printing out each feature-coefficient pair one by one for val in lasso_model_values: lasso_model_lst.append(val) # checking the results for pair in lasso_model_lst: print(pair) # - # ### OLS Model # + # INSTANTIATING a model object lr = LinearRegression() # FITTING to the training data lr_fit = lr.fit(x_train_OLS, y_train_OLS) # PREDICTING on new data lr_pred = lr_fit.predict(x_test_OLS) # SCORING the results print('OLS Training Score :', lr.score(x_train_OLS, y_train_OLS).round(4)) # using R-square print('OLS Testing Score :', lr.score(x_test_OLS, y_test_OLS).round(4)) # using R-square lr_train_score = lr.score(x_train_OLS, y_train_OLS) lr_test_score = lr.score(x_test_OLS, y_test_OLS) # displaying and saving the gap between training and testing print('OLS Train-Test Gap :', abs(lr_train_score - lr_test_score).round(4)) lr_test_gap = abs(lr_train_score - lr_test_score).round(4) # + # zipping each feature name to its coefficient lr_model_values = zip(ols_data[coef].columns, lr_fit.coef_.round(decimals = 2)) # setting up a placeholder list to store model features lr_model_lst = [('intercept', lr_fit.intercept_.round(decimals = 2))] # printing out each feature-coefficient pair one by one for val in lr_model_values: lr_model_lst.append(val) # checking the results for pair in lr_model_lst: print(pair) # - # ### ARD Model # + # INSTANTIATING a model object ard_model = sklearn.linear_model.ARDRegression() # FITTING the training data ard_fit = ard_model.fit(x_train_OLS, y_train_OLS) # PREDICTING on new data ard_pred = ard_fit.predict(x_test_OLS) #ard_pred = ard_fit.fit(x_test_FULL) # saving scoring data for future use ard_train_score = ard_model.score(x_train_OLS, y_train_OLS) ard_test_score = ard_model.score(x_test_OLS, y_test_OLS) print('Training Score:', ard_train_score) print('Testing Score :', ard_test_score) # displaying and saving the gap between training and testing print('ARD Train-Test Gap :', abs(ard_train_score - ard_test_score).round(4)) ard_test_gap = abs(ard_train_score - ard_test_score).round(4) # + # zipping each feature name to its coefficient ard_model_values = zip(ols_data.columns, ard_fit.coef_.round(decimals = 5)) # setting up a placeholder list to store model features ard_model_lst = [('intercept', ard_fit.intercept_.round(decimals = 2))] # printing out each feature-coefficient pair one by one for val in ard_model_values: ard_model_lst.append(val) # checking the results for pair in ard_model_lst: print(pair) # - # ### KNN Regression coef = ['h2_cig_first', 'drink', 'h2_cig_third', 'h2_mage_first', 'h2_mage_fourth', 'h2_drink_first', 'h2_feduc_third', 'h2_meduc', 'h2_cigsadddrink_first'] # + from sklearn.neighbors import KNeighborsRegressor # KNN for Regression from sklearn.preprocessing import StandardScaler # standard scaler # INSTANTIATING a StandardScaler() object #declaring the OLS dataframe with the previous features declared birthweight_data = birthweight.drop(['bwght', 'log_bwght'], axis =1) normal_birthweight_target = birthweight.loc[:,'bwght'] ols_data = birthweight_data.loc[:, coef] scaler = StandardScaler() # FITTING the scaler with ols_data scaler.fit(ols_data) #y variable is already standarized # TRANSFORMING our data after fit x_scaled = scaler.transform(ols_data) # converting scaled data into a DataFrame x_scaled_df = pd.DataFrame(x_scaled) # checking the results x_scaled_df.describe().round(2) # + x_train, x_test, y_train, y_test = train_test_split( x_scaled_df, normal_birthweight_target, test_size = 0.25, random_state = 219) # INSTANTIATING a KNN model object knn_reg = KNeighborsRegressor(algorithm = 'auto', n_neighbors = 10) # FITTING to the training data knn_fit = knn_reg.fit(x_train, y_train) # PREDICTING on new data knn_reg_pred = knn_fit.predict(x_test) # SCORING the results print('KNN Training Score:', knn_reg.score(x_train, y_train).round(4)) print('KNN Testing Score :', knn_reg.score(x_test, y_test).round(4)) # saving scoring data for future use knn_reg_score_train = knn_reg.score(x_train, y_train).round(4) knn_reg_score_test = knn_reg.score(x_test, y_test).round(4) # displaying and saving the gap between training and testing print('KNN Train-Test Gap:', abs(knn_reg_score_train - knn_reg_score_test).round(4)) knn_reg_test_gap = abs(knn_reg_score_train - knn_reg_score_test).round(4) # + # creating lists for training set accuracy and test set accuracy training_accuracy = [] test_accuracy = [] # building a visualization of 1 to 50 neighbors neighbors_settings = range(1, 51) for n_neighbors in neighbors_settings: # Building the model clf = KNeighborsRegressor(n_neighbors = n_neighbors) clf.fit(x_train, y_train) # Recording the training set accuracy training_accuracy.append(clf.score(x_train, y_train)) # Recording the generalization accuracy test_accuracy.append(clf.score(x_test, y_test)) # plotting the visualization fig, ax = plt.subplots(figsize=(12,8)) plt.plot(neighbors_settings, training_accuracy, label = "training accuracy") plt.plot(neighbors_settings, test_accuracy, label = "test accuracy") plt.ylabel("Accuracy") plt.xlabel("n_neighbors") plt.legend() plt.show() # - # finding the optimal number of neighbors opt_neighbors = test_accuracy.index(max(test_accuracy)) + 1 print(f"""The optimal number of neighbors is {opt_neighbors}""") # ## Comparing the score of different regression models # + # comparing results print(f""" Model Train Score Test Score GAP Score ----- ----------- ---------- ---------- OLS {round(lr_train_score,3)} {round(lr_test_score,3)} {round(lr_test_gap,3)} Lasso (Final Model) {round(lasso_train_score,3)} {round(lasso_test_score,3)} {round(lasso_test_gap,3)} ARD {round(ard_train_score,3)} {round(ard_test_score,3)} {round(ard_test_gap,3)} KNN Regression {round(knn_reg_score_train,3)} {round(knn_reg_score_test,3)} {round(knn_reg_test_gap,3)} """) # creating a dictionary for model results model_performance = { 'Model Type' : ['OLS', 'Lasso', 'ARD'], 'Training' : [lr_train_score, lasso_train_score, ard_train_score], 'Testing' : [lr_test_score, lasso_test_score, ard_test_score], 'Train-Test Gap' : [lr_test_gap, lasso_test_gap, ard_test_gap], 'Model Size' : [len(lr_model_lst), len(lasso_model_lst), len(ard_model_lst)], 'Model' : [lr_model_lst, lasso_model_lst, ard_model_lst]} # - print(""" Features: h2_cig_first = Number of cigarettes less or equal than 1 drink = quantity of drinks h2_cig_third = Number of cigarettes more than 21 h2_mage_first = Mother age less than 31 years old h2_mage_fourth = Mother age more than 51 years old h2_drink_first = Number of drinks less than 9 cigarettes h2_feduc_third = years of father education more than 11 h2_meduc = Years of mother education more than 15 h2_cigsadddrink_first = Cigarettes + drinks less than 18""") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## 迷宫路线求解 # # + pycharm={"name": "#%% \u4ee3\u7801\u5b9e\u73b0\n", "is_executing": false} import turtle PART_OF_PATH = 'O' #部分路径 TRIED = '.' #尝试 OBSTACLE = '+' #障碍 DEAD_END = '-' #死胡同 # 迷宫类 class Maze(object): # 读取迷宫数据,初始化迷宫内部,并找到海龟初始位置。 def __init__(self, maze_file_name): rows_in_maze = 0 #初始化迷宫行数 columns_in_maze = 0 #初始化迷宫列数 self.maze_list = [] #初始化迷宫列表 maze_path = open(maze_file_name, 'r') #读取迷宫文件 for line in maze_path: #按行读取 row_list = [] #初始化行列表 col = 0 #初始化列 for ch in line.replace('\n',''): #按列读取 row_list.append(ch) #添加到行列表 if ch == 'S': #S为乌龟初始位置,即迷宫起点 self.start_row = rows_in_maze #乌龟初始行 self.start_col = col #乌龟初始列 col = col + 1 #下一列 rows_in_maze = rows_in_maze + 1 #下一行 self.maze_list.append(row_list) #行列表添加到迷宫列表 print(len(row_list)) columns_in_maze = len(row_list) #获取迷宫总列 self.rows_in_maze = rows_in_maze #设置迷宫总行数 self.columns_in_maze = columns_in_maze #设置迷宫总列数 self.x_translate = -columns_in_maze//2 #设置迷宫左上角的初始x坐标 self.y_translate = rows_in_maze//2 #设置迷宫左上角的初始y坐标 self.t = turtle.Turtle() #创建一个海龟对象 self.t.shape('turtle') #给当前指示点设置样式(类似鼠标箭头),海龟形状为参数指定的形状名,指定的形状名应存在于TurtleScreen的shape字典中。多边形的形状初始时有以下几种:"arrow", "turtle", "circle", "square", "triangle", "classic"。 self.wn = turtle.Screen() #创建一个能在里面作图的窗口 self.wn.setworldcoordinates(-columns_in_maze//2, -rows_in_maze//2, columns_in_maze//2, rows_in_maze//2) #设置世界坐标系,原点在迷宫正中心。参数依次为画布左下角x轴坐标、左下角y轴坐标、右上角x轴坐标、右上角y轴坐标 # 在屏幕上绘制迷宫 def draw_maze(self): self.t.speed(20) #绘图速度 for y in range(self.rows_in_maze): #按单元格依次循环迷宫 for x in range(self.columns_in_maze): if self.maze_list[y][x] == OBSTACLE: #如果迷宫列表的该位置为障碍物,则画方块 self.draw_centered_box(x + self.x_translate, -y + self.y_translate) # 画方块 def draw_centered_box(self, x, y, color="White"): self.t.up() #画笔抬起 self.t.goto(x - 0.5, y - 0.5) #前往参数位置,此处0.5偏移量的作用是使乌龟的探索路线在单元格的正中心位置 self.t.color(color) #方块边框为橙色 self.t.fillcolor("slategray") #方块内填充色 self.t.setheading(90) #设置海龟的朝向,标准模式:0 - 东,90 - 北,180 - 西,270 - 南。logo模式:0 - 北,90 - 东,180 - 南,270 - 西。 self.t.down() #画笔落下 self.t.begin_fill() #开始填充 for i in range(4): #画方块边框 self.t.forward(1) #前进1个单位 self.t.right(90) #右转90度 self.t.end_fill() #结束填充 # 移动海龟 def move_turtle(self, x, y): self.t.up() #画笔抬起 #setheading()设置海龟朝向,towards()从海龟位置到由(x, y),矢量或另一海龟位置连线的夹角。此数值依赖于海龟初始朝向, # 由"standard"、"world"或"logo" 模式设置所决定。 self.t.setheading(self.t.towards(x + self.x_translate, -y + self.y_translate)) self.t.goto(x + self.x_translate, -y + self.y_translate) #前往目标位置 # 画路径圆点 def drop_bread_crumb(self, color): self.t.dot(color); #dot(size=None, color)画路径圆点 # 用以更新迷宫内的状态及在窗口中改变海龟位置,行列参数为乌龟的初始坐标。 def update_position(self, row, col, val): self.maze_list[row][col] = val #设置该标记状态为当前单元格的值 self.move_turtle(col, row) #移动海龟 if val == PART_OF_PATH: #其中一条成功路径的圆点的颜色 color = 'lime' elif val == TRIED: #尝试用的圆点的颜色 color = 'black' elif val == DEAD_END: #死胡同用的圆点的颜色 color = 'red' self.drop_bread_crumb(color) #画路径圆点并上色 # 用以判断当前位置是否为出口。 def is_exit(self, row, col): return (row == 0 or row == self.rows_in_maze - 1 or col == 0 or col == self.columns_in_maze - 1) #根据海龟位置是否在迷宫的4个边线位置判断 # 返回键对应的值,影响searchFrom()中maze[startRow][startColumn]值的获取 def __getitem__(self, key): return self.maze_list[key] # 探索迷宫,注意此函数包括三个参数:一个迷宫对象、起始行、起始列。 def search_from(maze, start_row, start_column): # 从初始位置开始尝试四个方向,直到找到出路。 # 1. 遇到障碍 if maze[start_row][start_column] == OBSTACLE: return False # 2. 发现已经探索过的路径或死胡同 if maze[start_row][start_column] == TRIED or maze[start_row][start_column]== DEAD_END: return False # 3. 发现出口 if maze.is_exit(start_row, start_column): maze.update_position(start_row, start_column, PART_OF_PATH)#显示出口位置,注释则不显示此点 return True maze.update_position(start_row, start_column, TRIED)#更新迷宫状态、设置海龟初始位置并开始尝试 # 4. 依次尝试每个方向 found = search_from(maze, start_row - 1, start_column) or \ search_from(maze, start_row + 1, start_column) or \ search_from(maze, start_row, start_column - 1) or \ search_from(maze, start_row, start_column + 1) if found: #找到出口 maze.update_position(start_row, start_column, PART_OF_PATH)#返回其中一条正确路径 else: #4个方向均是死胡同 maze.update_position(start_row, start_column, DEAD_END) return found # + pycharm={"name": "#%% \u6d4b\u8bd5\u6570\u636e\n", "is_executing": false} if __name__ == '__main__': maze = Maze('maze.txt') #实例化迷宫类,maze文件是使用“+”字符作为墙壁围出空心正方形空间,并用字母“S”来表示起始位置的迷宫文本文件。 maze.draw_maze() #在屏幕上绘制迷宫。 search_from(maze, maze.start_row, maze.start_col) #探索迷宫 # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Imports # + import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import torchvision from torchvision import * from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torch.utils.tensorboard import SummaryWriter import matplotlib.pyplot as plt import time import copy import os # - # ### Select CUDA Device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device batch_size = 128 learning_rate = 1e-3 # ### Loading the Data # + train_dataset = datasets.ImageFolder(root='../NSSI-Train', transform=transforms) test_dataset = datasets.ImageFolder(root='../NSSI-Test', transform=transforms) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True) # + data_transforms = { 'train': transforms.Compose([ transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.ToTensor() ]), 'validation': transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor() ]), } image_datasets = { 'train': datasets.ImageFolder('../NSSI-Train', data_transforms['train']), 'validation': datasets.ImageFolder('../NSSI-Test', data_transforms['validation']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=32, shuffle=True), 'validation': torch.utils.data.DataLoader(image_datasets['validation'], batch_size=32, shuffle=False) } # - # ### ResNet-50 Model # + net = models.resnet50(pretrained=True) if torch.cuda.is_available(): net.cuda() for param in net.parameters(): param.requires_grad = False net.fc = nn.Sequential( nn.Linear(2048, 128), nn.ReLU(inplace=True), nn.Linear(128, 2)).to(device) # + criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9) def accuracy(out, labels): _,pred = torch.max(out, dim=1) return torch.sum(pred==labels).item() # + training_accuracies = [] training_losses = [] def train_model(model, criterion, optimizer, num_epochs=3): for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('-' * 10) for phase in ['train', 'validation']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() _, preds = torch.max(outputs, 1) running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(image_datasets[phase]) epoch_acc = running_corrects.double() / len(image_datasets[phase]) print('{} loss: {:.4f}, acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) training_accuracies.append(epoch_acc) training_losses.append(epoch_loss) return model # - model_trained = train_model(net, criterion, optimizer, num_epochs=8) # ### Saving Model Weights and Visualizing # + torch.save(model_trained.state_dict(), './models/weights_ceLoss_epoch8.h5') plt.plot([i.cpu().numpy() for i in training_accuracies[::2]],'-o') plt.plot(training_losses[::2],'-o') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(['Accuracy','Loss']) plt.title('Training Accuracy and Loss') plt.show() # - plt.plot([i.cpu().numpy() for i in training_accuracies[1::2]],'-o') plt.plot(training_losses[1::2],'-o') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(['Accuracy','Loss']) plt.title('Validation Accuracy and Loss') plt.show() # ### Loading saved weights to memory net = models.resnet50(pretrained=False).to(device) net.fc = nn.Sequential(nn.Linear(2048, 128), nn.ReLU(inplace=True), nn.Linear(128, 2)).to(device) net.load_state_dict(torch.load('./models/weights_ceLoss_epoch8.h5')) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 7, "status": "ok", "timestamp": 1629896903423, "user": {"displayName": "curoky", "photoUrl": "", "userId": "15112146758138066479"}, "user_tz": -480} id="wCF0yv91a-tR" outputId="905d5620-1a4a-48f4-bd19-9d1e62aa4f1b" import glob glob.glob('/usr/bin/z*') # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:DESI] # language: python # name: conda-env-DESI-py # --- # # DESI Commissioning Weather Analysis # %pylab inline import datetime import pandas as pd import desimodel.weather # ## Dome Closed Fraction # Import [this spreadsheet](https://docs.google.com/spreadsheets/d/19a4i_ffxD9saup1O3-DJFDhqbS675JBMIp1QT7NQVXg/edit?usp=sharing) exported as a csv file: actual = pd.read_csv('DomeClosedCI.csv') # + def plot_dome(df=actual): # Extract the actual weather history. actual_frac = df['Closed Frac'] nights = pd.to_datetime(df['NIGHT']) first, last = nights.iloc[0].date(), nights.iloc[-1].date() dt = np.array([(night.date() - first).days for night in nights]) last += datetime.timedelta(days=1) nnights = dt[-1] + 1 assert nnights == (last - first).days print(f'Actual mean is {100 * actual_frac.mean():.1f}%') # Calculate the predicted dome-closed fraction for the nights of the CI run. cirun = np.zeros(nnights, bool) cirun[dt] = True nyears = 11 model_frac = np.empty(nyears) years = range(2007, 2018) for i, year in enumerate(years): closed = desimodel.weather.dome_closed_fractions(first, last, replay='Y{}'.format(year)) model_frac[i] = np.mean(closed[cirun]) ilo, ihi = np.argmin(model_frac), np.argmax(model_frac) print(f'Model mean is {100 * model_frac.mean():.1f}%') print(f'Worst year is {years[ihi]} with {100 * model_frac[ihi]:.1f}%') print(f'Best year is {years[ilo]} with {100 * model_frac[ilo]:.1f}%') # Plot the actual and predicted dome-closed fraction. fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].axhspan(model_frac[ilo], model_frac[ihi], color='b', alpha=0.2) ax[0].axhline(model_frac[ihi], label=f'Worst={years[ihi]}', ls=':', c='b') ax[0].axhline(np.median(model_frac), label='Median 2007-17', c='b') ax[0].axhline(model_frac[ilo], label=f'Best={years[ilo]}', ls=':', c='b') ax[0].scatter(dt, actual_frac, c='r', lw=0, label='CI Nightly') ax[0].set_xlim(0, nnights) ax[0].set_ylim(-0.01, 1.01) ax[0].set_xlabel('Nights since 20190401') ax[0].set_ylabel('Dome Closed Fraction') ax[0].axhline(actual_frac.mean(), c='r', ls='--', label='CI Mean') ax[0].legend(loc='center left', ncol=2) ax[1].axvline(actual_frac.mean(), c='r', ls='--', label='CI Mean') ax[1].hist(model_frac, bins=np.linspace(0, 0.3, 9), histtype='stepfilled', alpha=0.2, color='b', label='2007-17') ax[1].axvline(model_frac.mean(), c='b', ls='-', label='Model Mean') ax[1].set_yticks([]) ax[1].legend() ax[1].set_xlim(0., 0.3) ax[1].set_xlabel('Dome Closed Fraction') plt.tight_layout() plt.savefig('CIdome.pdf') plot_dome() # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + id="zOm7Mk0b0gjp" colab_type="code" colab={} import pandas as pd uri="https://raw.githubusercontent.com/alura-cursos/introducao-a-data-science/master/aula4.1/movies.csv" filmes=pd.read_csv(uri) filmes.columns=["filmeId","titulo","generos"] filmes=filmes.set_index('filmeId') filmes=filmes.join(filmes['generos'].str.get_dummies()).drop('generos',axis=1) filmes['ano']=filmes['titulo'].str.extract(r'.*\((\d+)\)') filmes=filmes.dropna() #filmes.head() # + id="mZWQ5mdF2q4e" colab_type="code" colab={} uri="https://raw.githubusercontent.com/alura-cursos/introducao-a-data-science/master/aula4.1/ratings.csv" notas=pd.read_csv(uri) notas.columns=["usuarioId","filmesId","nota","momento"] arredondadas=notas['nota'].round(1) #notas.head() # + id="Z9luumbh3TXi" colab_type="code" colab={} medias=notas.groupby('filmesId')['nota'].mean() filmes=filmes.join(medias).dropna().sort_values('nota',ascending=False).rename(columns={'nota': 'media'}) total=notas.groupby('filmesId')['momento'].count() filmes=filmes.join(total) filmes=filmes.rename(columns={'momento':'total'}) filmes=filmes.query("total > 50") filmes['media_categoria]']=(filmes['media']).round(1).sort_values random_filmes=filmes.sample(10) #random_filmes.head() # + id="l3cRISnY6uYv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="fdd13593-b474-4a27-9aa3-3f79c1bd7785" filmes.head() # + id="CPbth0vS_Ewk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 801} outputId="d1b53633-d52d-41ef-e14b-5cee3315b87b" random_filmes # + id="a4ttgb_f_q3t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="fbdb06cc-5e99-4e0f-9118-b4f6ee5224e5" notas['nota'].hist() # + id="8VxJRZBQABFl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="489edd1e-bf9f-4120-d025-f4345d591227" notas.hist() # + id="Y-yOjeY8AGQ2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="6ba49db1-50ce-4b0a-a969-423c13f5db25" notas['nota'].hist() # + id="mOiE8qxvAOlo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="38b3f016-0d04-4a78-91b4-560f25041077" arredondadas.value_counts().plot.pie() # + id="7WSvQGBDAOpF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="7dd45842-e847-4884-ae0f-7aafb2d0f2b6" import seaborn as sns # + id="ng_B23ZeAOsQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="bb1024d8-a1ff-4197-a593-96a18c17b06f" sns.countplot(arredondadas) # + id="gRmLq0bcAOwK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="59729b4b-4cb7-4694-be0f-09990ba2bf0e" import matplotlib.pyplot as plt sns.countplot(arredondadas) plt.title("Distribuição das notas"); # + id="Ya22PCQGR-Q_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="f5a4aed5-cc06-4082-98d3-be3982f62183" palette=sns.color_palette("Blues",10) sns.countplot(arredondadas, palette=palette) # + id="PmUo__NpR-ZO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="2d3fea18-bcd8-4802-d5e7-2b468d14335d" sns.distplot(filmes['media']) # + id="mzCn4-CRR-lH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 433} outputId="c8ab0229-2356-4412-a7ed-686faf81b186" p=sns.barplot(data=random_filmes, x="titulo",y="media") p.set_xticklabels(p.get_xticklabels(), rotation=45, horizontalalignment="right") plt.title("Notas médias de 10 filmes") plt.show() # + id="J9QDZu1mR-qf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="f2d2eb43-e71c-4e55-de82-8729c9234323" sns.catplot(data=filmes, x="Action", y="media") # + id="fS3PoVktAO3b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="864f2ca7-a445-40d1-98a5-b5480aacdef8" sns.distplot(filmes.query("Action==1")['media']) sns.distplot(filmes.query("Action==0")['media']) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Define fxn to calculate regress and correl coeff maps # - Calculate correl and regress coeff map btwn two (time,lat,lon) xr dataarrays def gettemprcmap_loop(x3dnow,y3dnow,namebasenow): # - Initialize nan DataArray same size as x3dnow # --> new DataArray takes name of old though, so will need to rename # (Apparently you can't initialize empty or full DataArrays yet still: https://github.com/pydata/xarray/issues/277) rc2dnow = x3dnow.mean(dim='time') * np.nan int2dnow = x3dnow.mean(dim='time') * np.nan rcpval2dnow = x3dnow.mean(dim='time') * np.nan cc2dnow = x3dnow.mean(dim='time') * np.nan ccpval2dnow = x3dnow.mean(dim='time') * np.nan stderr2dnow = x3dnow.mean(dim='time') * np.nan # - Loop over each lat/lon combo for ilat in range(x3dnow.lat.size): for ilon in range(x3dnow.lon.size): xnow = x3dnow[:,ilat,ilon] ynow = y3dnow[:,ilat,ilon] # - Get rid of any times where xnow or ynow is nan or inf valsnow = (~xr.ufuncs.isnan(xnow))&(~xr.ufuncs.isnan(ynow))&(~xr.ufuncs.isinf(xnow))&(~xr.ufuncs.isinf(ynow)) if valsnow.sum()>=2: #print([ilat,ilon]) ccnow = stats.pearsonr(xnow[valsnow],ynow[valsnow]) cc2dnow[ilat,ilon] = ccnow[0] ccpval2dnow[ilat,ilon] = ccnow[1] #[rc2dnow[ilat,ilon], int2dnow[ilat,ilon], cc2dnow[ilat,ilon], pval2dnow[ilat,ilon], # stderr2dnow[ilat,ilon]] = stats.linregress(xnow[valsnow],ynow[valsnow]) linregnow = stats.linregress(xnow[valsnow],ynow[valsnow]) rc2dnow[ilat,ilon] = linregnow[0] int2dnow[ilat,ilon] = linregnow[1] rcpval2dnow[ilat,ilon] = linregnow[3] stderr2dnow[ilat,ilon] = linregnow[4] # - Rename rc2dnow.name = namebasenow+'_rc' int2dnow.name = namebasenow+'_int' rcpval2dnow.name = namebasenow+'_rc_pval' cc2dnow.name = namebasenow+'_cc' ccpval2dnow.name = namebasenow+'_cc_pval' stderr2dnow.name = namebasenow+'_stderr' return [rc2dnow,int2dnow,rcpval2dnow,cc2dnow,ccpval2dnow,stderr2dnow] # ### Define fxn to generate Wilcoxon ranksum p-value maps # - Calculate wilcoxon rank sum p-val map btwn two (time,lat,lon) xr dataarrays def wrspvalmap_loop(x3dnow,y3dnow,namebasenow): # - Initialize nan DataArray same size as x3dnow # --> new DataArray takes name of old though, so will need to rename pval2dnow = xr.full_like(x3dnow.mean(dim='time'), np.nan) # - Loop over each lat/lon combo for ilat in range(x3dnow.lat.size): for ilon in range(x3dnow.lon.size): xnow = x3dnow[:,ilat,ilon] ynow = y3dnow[:,ilat,ilon] # - Get rid of any times where xnow or ynow is nan or inf xvalsnow = (~xr.ufuncs.isnan(xnow))&(~xr.ufuncs.isinf(xnow)) yvalsnow = (~xr.ufuncs.isnan(ynow))&(~xr.ufuncs.isinf(ynow)) if (xvalsnow.sum()>=1)&(yvalsnow.sum()>=1): #print([ilat,ilon]) wrsnow = stats.ranksums(xnow[xvalsnow],ynow[yvalsnow]) pval2dnow[ilat,ilon] = wrsnow[1] # - Rename pval2dnow.name = namebasenow+'_wrs_pval' return pval2dnow # ### Define fxn to generate Kruskal-Wallis p-value maps (compares seasons) # - Calculate kruskal-wallis p-val map btwn four (time,lat,lon) xr dataarrays def kwpvalmap_loop(var1_3dnow,var2_3dnow,var3_3dnow,var4_3dnow,namebasenow): # - Initialize nan DataArray same size as var1_3dnow # --> new DataArray takes name of old though, so will need to rename pval2dnow = xr.full_like(var1_3dnow.mean(dim='time'), np.nan) # - Loop over each lat/lon combo for ilat in range(var1_3dnow.lat.size): for ilon in range(var1_3dnow.lon.size): x1now = var1_3dnow[:,ilat,ilon] x2now = var2_3dnow[:,ilat,ilon] x3now = var3_3dnow[:,ilat,ilon] x4now = var4_3dnow[:,ilat,ilon] # - Get rid of any times where xnow is nan or inf x1valsnow = (~xr.ufuncs.isnan(x1now))&(~xr.ufuncs.isinf(x1now)) x2valsnow = (~xr.ufuncs.isnan(x2now))&(~xr.ufuncs.isinf(x2now)) x3valsnow = (~xr.ufuncs.isnan(x3now))&(~xr.ufuncs.isinf(x3now)) x4valsnow = (~xr.ufuncs.isnan(x4now))&(~xr.ufuncs.isinf(x4now)) if (x1now[x1valsnow].sum()>=1)&(x2now[x2valsnow].sum()>=1)& \ (x3now[x3valsnow].sum()>=1)&(x4now[x4valsnow].sum()>=1): #print([ilat,ilon]) kwnow = stats.kruskal(x1now[x1valsnow],x2now[x2valsnow], x3now[x3valsnow],x4now[x4valsnow]) pval2dnow[ilat,ilon] = kwnow[1] # - Rename pval2dnow.name = namebasenow+'_kw_pval' return pval2dnow # ### Define fxn to control false discovery rate in 2D maps # - Calculate minimum significant p-val threshold from false discovery rate (fdr) def controlfdr2d(pval2dnow,alphafdr): pvalstack=pval2dnow.stack(x=['lat','lon']) pvalstack=pvalstack[pvalstack.notnull()] sortedpvalstack = pvalstack.sortby(pvalstack).values N = sortedpvalstack.size pfdrarr = alphafdr*np.arange(1,N+1)/N if np.sum((sortedpvalstack-pfdrarr)<=0)>0: pthreshfdr = sortedpvalstack[(pfdrarr-sortedpvalstack)>=0].max() else: pthreshfdr = 0 return pthreshfdr # ### Define fxn to find spots where p-value is below certain value # - Find lat and lon where all p-values are small def find_where_pval_small(pvalmap,alpha): # - Find spatial pts where all p-val are smaller than alpha pvalmap_small_nonnan = pvalmap.where(pvalmap1 and len(lnnow)>1: _,enlnpvalnow[ieez] = stats.ranksums(ennow,lnnow) varallnow.append(allnow) varennow.append(ennow) varlnnow.append(lnnow) return varallnow, varennow, varlnnow, enlnpvalnow def get_seas_eezmask_seaskwpval(varnow,eeznames,eezmask,dfeeznamesmask): varwinnow, varsprnow, varsumnow, varautnow = [], [], [], [] seaspvalnow = np.full(len(eeznames), np.nan) for ieez in range(0,len(eeznames)): eeznumnow = dfeeznamesmask['numbers'][dfeeznamesmask['names']==eeznames[ieez]].values winnow = varnow.sel(time=varnow['time.season']=='DJF' ).where(np.isin(eezmask,eeznumnow)).values winnow = winnow[~np.isnan(winnow)] sprnow = varnow.sel(time=varnow['time.season']=='MAM' ).where(np.isin(eezmask,eeznumnow)).values sprnow = sprnow[~np.isnan(sprnow)] sumnow = varnow.sel(time=varnow['time.season']=='JJA' ).where(np.isin(eezmask,eeznumnow)).values sumnow = sumnow[~np.isnan(sumnow)] autnow = varnow.sel(time=varnow['time.season']=='SON' ).where(np.isin(eezmask,eeznumnow)).values autnow = autnow[~np.isnan(autnow)] if len(winnow)>1 and len(sprnow)>1 and len(sumnow)>1 and len(autnow)>1: _,seaspvalnow[ieez] = stats.kruskal(winnow, sprnow, sumnow, autnow) varwinnow.append(winnow) varsprnow.append(sprnow) varsumnow.append(sumnow) varautnow.append(autnow) return varwinnow, varsprnow, varsumnow, varautnow, seaspvalnow def set_box_color(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['medians'], color=color) # ### Define fxn to control false discovery rate in 1D array def controlfdr1d(pvals1d,alphafdr): pvals1d = pvals1d[~np.isnan(pvals1d)] sortedpvals = np.sort(pvals1d) N = sortedpvals.size pfdrarr = alphafdr*np.arange(1,N+1)/N if np.sum((sortedpvals-pfdrarr)<=0)>0: pthreshfdr = sortedpvals[(sortedpvals-pfdrarr)<=0].max() else: pthreshfdr = 0 return pthreshfdr # ### Define fxn for calculating pO2 # + # function from 's github: # https://github.com/kallisons/pO2_conversion/function_pO2.py # with some modifications for style and units # pO2 Conversion: # Oxygen concentration is converted to percent oxygen saturation using the equations from Garcia and Gordon (1992). # The percent oxygen saturation is divided by 0.21 (the fractional atmospheric concentration of oxygen) to get pO2 in atmospheres (atm). # pO2 is then corrected for the hydrostatic pressure at depth (Enns et al., 1965). # The units for pO2 are converted to kilopascals (kPa), the SI Units for pressure. # References: # - , (1992) Oxygen solubility in seawater: Better fitting equations. Limnology and Oceanography, 37, 1307–1312. # - Enns T, Scholander PF, Bradstreet ED (1965) Effect of hydrostatic pressure on gases dissolved in water. The Journal of Physical Chemistry, 69, 389–391. # UNITS: # o2 in umol/Kg # temp in Celsius (= potential temperature, NOT in situ) # sal in psu # depth in m # po2 returned in kPa def calc_po2(o2, temp, sal, depth): """Computes po2 from o2 [umol/kg], potential temperature [Celsius], salinity [psu], depth [m].""" a_0 = 5.80871 a_1 = 3.20291 a_2 = 4.17887 a_3 = 5.10006 a_4 = -9.86643e-2 a_5 = 3.80369 b_0 = -7.01577e-3 b_1 = -7.70028e-3 b_2 = -1.13864e-2 b_3 = -9.51519e-3 c_0 = -2.75915E-7 tt = 298.15 - temp tk = 273.15 + temp ts = np.log(tt / tk) #correct for pressure at depth V = 32e-6 #partial molar volume of O2 (m3/mol) R = 8.31 #Gas constant [J/mol/K] db2Pa = 1e4 #convert pressure: decibar to Pascal atm2Pa = 1.01325e5 #convert pressure: atm to Pascal #calculate pressure in dB from depth in m pres = depth*(1.0076+depth*(2.3487e-6 - depth*1.2887e-11)); #convert pressure from decibar to pascal dp = pres*db2Pa pCor = np.exp((V*dp)/(R*(temp+273.15))) o2_sat = np.exp(a_0 + a_1*ts + a_2*ts**2 + a_3*ts**3 + a_4*ts**4 + a_5*ts**5 + sal*(b_0 + b_1*ts + b_2*ts**2 + b_3*ts**3) + c_0*sal**2) o2_alpha = (o2_sat / 0.21) #0.21 is atmospheric composition of O2 kh = o2_alpha*pCor po2 = (o2 / kh)*101.32501 #convert po2 from atm to kPa return po2 # - # ### Define fxns for performing + plotting quotient analysis def quotient_analysis_full(ivfull,dvfull,ivname,dvname,nbins,nruns,plothists): # IV for indy var, DV for dep var iv = ivfull.where(dvfull.notnull()) dv = dvfull.where(ivfull.notnull()) ivminf = np.floor(10*iv.min())/10 ivmaxc = np.ceil(10*iv.max())/10 binedges = np.linspace(ivminf,ivmaxc,nbins+1) if plothists==1: fig,axes = plt.subplots(figsize=(7,5),nrows=2,ncols=2) ivfull.plot.hist(ax=axes[0][0],bins=20); axes[0][0].set_title('IV - all values'); iv.plot.hist(ax=axes[0][1],bins=20); axes[0][1].set_title('IV - assoc w/ DV values'); dvfull.plot.hist(ax=axes[1][0],bins=20); axes[1][0].set_title('DV - all values'); dv.plot.hist(ax=axes[1][1],bins=20); axes[1][1].set_title('DV - assoc w/ IV values'); fig.tight_layout() dsqa = xr.merge([iv, dv]) dfqa = dsqa.to_dataframe() dfqa.rename(columns={ivname: 'IV', dvname: 'DV'}, inplace=True) dfqa.reset_index(inplace=True) dfqa = dfqa.dropna(subset=['IV','DV'], how='any') ivcounts, dvcounts, dvquot = quotient_analysis(dfqa,binedges) # Bernal et al. 2007: replace=FALSE in the code, but in the paper it says replace=TRUE # --> I think I'll go w/ what the paper says? though difference # between the two methods is small dfsimreplaceF = pd.DataFrame(); dfsimreplaceT = pd.DataFrame() for i in range(nruns): dfsimreplaceF=pd.concat([dfsimreplaceF,dfqa['DV'].sample( n=len(dfqa['DV']), replace=False).reset_index( drop=True)], axis=1).rename(columns={'DV':i}) dfsimreplaceT=pd.concat([dfsimreplaceT,dfqa['DV'].sample( n=len(dfqa['DV']), replace=True).reset_index( drop=True)], axis=1).rename(columns={'DV':i}) dfsimreplaceF = dfsimreplaceF.assign(IV=dfqa['IV'].values) dfsimreplaceT = dfsimreplaceT.assign(IV=dfqa['IV'].values) quotsimreplaceF = pd.DataFrame(); quotsimreplaceT = pd.DataFrame() for i in range(nruns): _,_,quotsimreplaceFtemp = quotient_analysis( dfsimreplaceF[[i,'IV']].rename(columns={i:'DV'}),binedges) quotsimreplaceF = pd.concat([quotsimreplaceF,quotsimreplaceFtemp], axis=1) _,_,quotsimreplaceTtemp = quotient_analysis( dfsimreplaceT[[i,'IV']].rename(columns={i:'DV'}),binedges) quotsimreplaceT = pd.concat([quotsimreplaceT,quotsimreplaceTtemp], axis=1) qlimsreplaceT = quotsimreplaceT.quantile([0.025, 0.975], axis=1) qlimsreplaceF = quotsimreplaceF.quantile([0.025, 0.975], axis=1) bincenters = (binedges[1:] + binedges[:-1])/2 return dfqa['IV'], binedges, bincenters, ivcounts, dvcounts, dvquot, qlimsreplaceT, qlimsreplaceF def quotient_analysis(dfqa,binedges): # IV for indy var, DV for dep var dfqa['IV_bin'] = pd.cut(dfqa['IV'],binedges) ivcounts = dfqa.groupby('IV_bin')['IV'].count() dvcounts = dfqa.groupby('IV_bin')['DV'].sum() ivfreq = ivcounts*100/ivcounts.sum() dvfreq = dvcounts*100/dvcounts.sum() dvquot = dvfreq/ivfreq return ivcounts, dvcounts, dvquot def define_atp(row): if row['quot']=row['2pt5p'] and row['quot']<=row['97pt5p']: return 0 # tolerance if row['quot']>row['97pt5p']: return 1 # preference def get_dfatp(qlimsreplaceT,dvquot,binedges): dfatp = pd.concat([qlimsreplaceT.transpose(),dvquot], axis=1) dfatp = dfatp.rename(columns={0:'quot', 0.025:'2pt5p', 0.975:'97pt5p'}) dfatp['ATP'] = dfatp.apply(define_atp, axis=1) dfatp.reset_index(inplace=True) dfatp = pd.concat([dfatp,pd.Series(binedges[0:-1],name='lbinedges'), pd.Series(binedges[1:],name='rbinedges')], axis=1) return dfatp # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # this notebook will extract the relevant dataset from Kaggle dataset to predict default import pandas as pd df = pd.read_csv('loan.csv') len(df) df.head() df.columns df[df.index<10].to_csv('sample.csv') df.loan_status.unique() in_default = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'Does not meet the credit policy. Status:Charged Off'] len(in_default) df['in_default'] = df.loan_status.map(lambda x: 1 if x in in_default else 0) df.in_default.unique() len(df[df.in_default==1]) / len(df) cols = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'term', 'int_rate', 'installment', 'grade', 'sub_grade', 'emp_title', 'emp_length', 'home_ownership', 'annual_inc', 'verification_status', 'issue_d', 'loan_status', 'pymnt_plan', 'desc', 'purpose', 'title', 'zip_code', 'addr_state', 'dti', 'delinq_2yrs', 'earliest_cr_line', 'inq_last_6mths', 'mths_since_last_record', 'open_acc', 'pub_rec', 'revol_bal', 'revol_util', 'total_acc', 'initial_list_status', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'last_pymnt_amnt', 'next_pymnt_d', 'last_credit_pull_d', 'collections_12_mths_ex_med', 'mths_since_last_major_derog', 'policy_code', 'application_type', 'annual_inc_joint', 'dti_joint', 'verification_status_joint', 'in_default'] df[cols].to_csv('default.csv', index=False) # + # df1 = pd.DataFrame(cols) # + # df1.to_csv('desc.csv', index=False) # - # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # String Formatting (Interactive) # # This is an interactive tutorial. You can click the rocket -> binder link at the top of this page to interact with the code and outputs below. # # ## Why Format # # Often string formatting is used to truncate part of a string or round floating point numbers to a desired precision. All of the techniques below use a special shorthand for specifying the formatting desired. # # For example: # # To round pi to 4 decimal points, we would use the float formatter, `f` and specify `.4` decimal places. This will round to the number of decimal places specified. # # # + tags=[] pi = 3.14159265 print("Pi is %.4f" % pi) # - # To specify a certain number of characters width, you can also put a number before the decimal point in the format code. # # For example: # # To print the following lines as 10 characters each, we would specify `%10.f` as the format code and Python will automatically add spaced in front to make the output 10 characters long: # + tags=[] print("%10.f" % 1) print("%10.f" % 10) print("%10.f" % 100) print("%10.f" % 1000) print("%10.f" % 10000) # - # ## Three types of string formatting # # Python allows you to specify how to format strings in 3 ways: # # * The Old school (% operator): `" "%()` # * The `.format()` method: `" ".format()` # * With an f-string (Python version > 3.6 only): `f" "` # # Useful sites: # # - Python string docs [link](https://docs.python.org/3/library/string.html) # - Using % and .format(): [link](https://pyformat.info) # - Python f-strings [link](http://zetcode.com/python/fstring/) # ## Old school (%) # # Below we use the `%` operator after the string to include the three elements into the three locations denoted by `%` within the string. # # We use three different format codes for the three numbers included in the string: # # - The integer format code `d` # - The float format code rounding to a whole number `.0f` # - The float format code rounding to 2 decimal places `.2f`. # # Note: To print a regular %, you need to type two %% in a row when using the `%` operator. # + tags=[] discount = 30 # % price = 499.99 new_price = price * (1-discount/100) print("SALE: Get %d%% off the new PS5 and pay $%.0f instead of $%.2f." % (discount, new_price, price)) # - # ## Format method (.format()) # # The format method inserts each element in the parentheses of `.format()`, into the brackets in the string. The same format codes used above are included after the colon in each set of brackets `"{:fmt}"`. # + tags=[] print("SALE: Get {:d}% off the new PS5 and pay ${:.0f} instead of ${:.2f}." .format(discount, new_price, price)) # - # ## F-string (f'') # # The f-string allows us to include the element in place in the string instead of in a list at the end. This can make it easier to read which values are being inserted into which parts of the string. The syntax is `{value:fmt}` with the value being specified before the colon and the format code after the colon in the `{}` brackets. # + tags=[] print(f"SALE: Get {discount:d}% off the new PS5 and pay ${new_price:.0f} instead of ${price:.2f}.") # - # ## String formatting with loop # # We can loop through the following two lists of values and print each pair with a format string. # + tags=[] month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] balance = [-10.45, 50.99, 0.99, -5.76, 100.57, -78.22] print('Monthly Balance') for mo, bal in zip(month, balance): print(f'We have ${bal:.2f} left in {mo}.') # - # This looks a little messy. We can clean up our output by: # # - Specifying the width of the number field. Since the longest output is 6 characters, we can use the code `6.2f` to have Python pad all of the numbers to 6 characters long # - We can also make a 30-character underline with the `-^30` short hand. # + tags=[] # Monthly Balance is centered on a 30 char line of --- print('{:-^30}'.format('Monthly Balance')) for mo, bal in zip(month, balance): print(f'We have $ {bal:6.2f} left in {mo}.') # Here we can make a 30 char line of === print(f'{"":=^30}') print('Total balance for the first {} months was $ {:.2f}'.format(len(month), sum(balance))) # - # ## Practice: multiplication table # # Try to exactly reproduce the table below by using a loop, print statements and string format codes. # # Note: Pay attention to spacing, zeros in front of the single digit numbers, and the horizontal and vertical lines # # ```{code-block} # 01 |02 03 04 05 06 07 08 09 # ------------------------------------ # 02 |04 06 08 10 12 14 16 18 # 03 |06 09 12 15 18 21 24 27 # 04 |08 12 16 20 24 28 32 36 # 05 |10 15 20 25 30 35 40 45 # 06 |12 18 24 30 36 42 48 54 # 07 |14 21 28 35 42 49 56 63 # 08 |16 24 32 40 48 56 64 72 # 09 |18 27 36 45 54 63 72 81 # ``` # ```{admonition} Click the button to reveal ONE of the answers! # :class: dropdown # # ```{code-block} python # import numpy as np # for i in range(1,10): # j = np.arange(1,10) # # for idx, ans in enumerate(i*j): # if idx!=8: # print(f'{ans:02d} ', end='') # else: # print(f'{ans:02d} ') # # if idx==0: # print('|', end='') # # if i == 1: # print('{}'.format('-'*(4*9)) ) # ``` # ``` # + # Create your multiplication table here! # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import math import numpy as np import openrtdynamics2.lang as dy from vehicle_lib.vehicle_lib import * # - # load track data with open("track_data/simple_track.json", "r") as read_file: track_data = json.load(read_file) # + # # Demo: a vehicle controlled to follow a given path # # Implemented using the code generator openrtdynamics 2 - https://pypi.org/project/openrtdynamics2/ . # This generates c++ code for Web Assembly to be run within the browser. # system = dy.enter_system() velocity = dy.system_input( dy.DataTypeFloat64(1), name='velocity', default_value=23.75, value_range=[0, 25], title="vehicle velocity") k_p = dy.system_input( dy.DataTypeFloat64(1), name='k_p', default_value=2.0, value_range=[0, 10.0], title="controller gain") disturbance_amplitude = dy.system_input( dy.DataTypeFloat64(1), name='disturbance_amplitude', default_value=20.0, value_range=[-45, 45], title="disturbance amplitude") * dy.float64(math.pi / 180.0) sample_disturbance = dy.system_input( dy.DataTypeInt32(1), name='sample_disturbance', default_value=50, value_range=[0, 300], title="disturbance position") # parameters wheelbase = 3.0 # sampling time Ts = 0.01 # create storage for the reference path: path = import_path_data(track_data) # create placeholders for the plant output signals x = dy.signal() y = dy.signal() psi = dy.signal() # track the evolution of the closest point on the path to the vehicles position projection = track_projection_on_path(path, x, y) d_star = projection['d_star'] # the distance parameter of the path describing the closest point to the vehicle x_r = projection['x_r'] # (x_r, y_r) the projected vehicle position on the path y_r = projection['y_r'] psi_r = projection['psi_r'] # the orientation angle (tangent of the path) K_r = projection['K_r'] # the curvature of the path Delta_l = projection['Delta_l'] # the lateral distance between vehicle and path tracked_index = projection['tracked_index'] # the index describing the closest sample of the input path # reference for the lateral distance Delta_l_r = dy.float64(0.0) # zero in this example dy.append_output(Delta_l_r, 'Delta_l_r') # feedback control u = dy.PID_controller(r=Delta_l_r, y=Delta_l, Ts=0.01, kp=k_p) # path tracking # resulting lateral model u --> Delta_l : 1/s Delta_u = dy.asin( dy.saturate(u / velocity, -0.99, 0.99) ) delta_star = psi_r - psi steering = delta_star + Delta_u steering = dy.unwrap_angle(angle=steering, normalize_around_zero = True) dy.append_output(Delta_u, 'Delta_u') dy.append_output(delta_star, 'delta_star') # # The model of the vehicle including a disturbance # # model the disturbance disturbance_transient = np.concatenate(( cosra(50, 0, 1.0), co(10, 1.0), cosra(50, 1.0, 0) )) steering_disturbance, i = dy.play(disturbance_transient, start_trigger=dy.counter() == sample_disturbance, auto_start=False) # apply disturbance to the steering input disturbed_steering = steering + steering_disturbance * disturbance_amplitude # steering angle limit disturbed_steering = dy.saturate(u=disturbed_steering, lower_limit=-math.pi/2.0, upper_limit=math.pi/2.0) # the model of the vehicle x_, y_, psi_, x_dot, y_dot, psi_dot = discrete_time_bicycle_model(disturbed_steering, velocity, Ts, wheelbase) # close the feedback loops x << x_ y << y_ psi << psi_ # # outputs: these are available for visualization in the html set-up # dy.append_output(x, 'x') dy.append_output(y, 'y') dy.append_output(psi, 'psi') dy.append_output(steering, 'steering') dy.append_output(x_r, 'x_r') dy.append_output(y_r, 'y_r') dy.append_output(psi_r, 'psi_r') dy.append_output(Delta_l, 'Delta_l') dy.append_output(steering_disturbance, 'steering_disturbance') dy.append_output(disturbed_steering, 'disturbed_steering') dy.append_output(tracked_index, 'tracked_index') # generate code for Web Assembly (wasm), requires emcc (emscripten) to build code_gen_results = dy.generate_code(template=dy.TargetWasm(enable_tracing=False), folder="generated/path_following_control", build=True) # dy.clear() # - import IPython IPython.display.IFrame(src='../vehicle_control_tutorial/path_following_control.html', width='100%', height=1000) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple gate-level operations # This notebook demonstrates some of the simple wrapping provided to examine the values of cascaded gate operations. Support is provided to manipulate the pybind11 wrapped matrices (DCM), or mapping them directly to numpy types, allowing use of the full scipy ecosystem. # %matplotlib notebook from matplotlib import rc rc('text', usetex=True) import matplotlib.pyplot as plt # + # Load the wrqpped simulator as before from PyQNLPSimulator import PyQNLPSimulator as p # Load the wrapped 2x2 matrix type; dcm => double complex matrix from PyQNLPSimulator import DCMatrix as dcm # Load the QNLP python packages import QNLP as q # Useful for examining various product types from itertools import product # Load numpy and matrix-based functions from scipy import numpy as np from scipy.linalg import expm, sinm, cosm # - # Define a matrix using lexicographical ordering ll=[0,1,1,0] m = dcm(ll) # Using the matrix `m` we can perform a variety of oeprations using the underlying C++ wrapped methods. # # Note: We do not restrict ourselves to these being unitary. print(m*m) print(m + m) print(2*m - 5*m) # Additionally, to avail of the numpy/scipy ecosystem, it makes sense to have conversions between the numpy datatypes and the above matrices. This allows using to perform a variety of useful operations. # Create numpy variant nm = m.as_numpy() # Below we demonstrate comparisons between both types, and the operations available to each. We can load the matrices directly using the 'DCM' type, from the PyQNLPSimulator class 'GateOps', or access them from an initialised simulator object. # + from PyQNLPSimulator import GateOps sim = p(3, False) gops = GateOps(sim) #Direct from simulator X = sim.getGateX() Y = sim.getGateY() Z = sim.getGateZ() H = sim.getGateH() #GateOps access Xgo = gops.X #Numpy variant Xnp = np.matrix(X.as_numpy()) Hnp_go = gops.Hnp # - print(H * X) print(Hnp_go * Xnp) # Using the numpy variants, we can explicitly pass our matrices to the matrix functions loaded from `scipy.linalg`. In this case, we calculate $\exp(iH)$, where $H$ is the Hadamard gate. expm(1j*H.as_numpy()) # We can thus define rotation matrices from the above gate definitions using the $R_a(\theta) = \exp\left(-\frac{i\theta a}{2}\right)$ definition. The below listed methods are all available within the `GateOps` class def RX(theta): return np.matrix(expm(-0.5*1j*theta*X.as_numpy())) def RY(theta): return np.matrix(expm(-0.5*1j*theta*Y.as_numpy())) def RZ(theta): return np.matrix(expm(-0.5*1j*theta*Z.as_numpy())) def RHam(theta, Ham): return np.matrix(expm(-0.5*1j*theta*Ham.as_numpy())) # Looking at the application of this matrix on a simple vector can be performed as follows: for i in range(10): data = (RY(np.pi*(i/10))*np.array([[1],[0]])) print(data) # We can visualise this rotation as follows: arr = np.array([[1],[0]]) for i in range(10): arr = RY(np.pi/10)*arr arr bin(296) # + from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm, colors fig = plt.figure(figsize=plt.figaspect(1)) ax = fig.add_subplot(111, projection='3d') # Bloch sphere r = 0.99 phi, theta = np.mgrid[0.0:np.pi:100j, 0.0:2.0*np.pi:100j] x0 = r*np.sin(phi)*np.cos(theta) y0 = r*np.sin(phi)*np.sin(theta) z0 = r*np.cos(phi) ax.plot_surface(x0, y0, z0, color='violet', linewidth=1.0, alpha=0.55) data = np.array([[0],[1]]) arrow0 = ax.quiver(0,0,0, np.real(data[0]), 0, np.real(data[1]), length=1.0, color='black') for i in range(10): data = (RY(np.pi*(i/10))*np.array([[1],[0]])) arrow = ax.quiver(0,0,0, np.real(data[0]), 0, np.real(data[1]), length=1.0) ax.set_xlim([-1,1]) ax.set_ylim([-1,1]) ax.set_zlim([-1,1]) #ax.set_aspect("equal") plt.tight_layout() ax.grid(False) plt.pause(0.05) plt.draw() # - # To examine the product of these gates, as would be offered by the quantum circuit model, we can create tensor products as: # We can, as demonstrated previously, raise these to exponents. Care should be taken though, as Kronecker products grow the memory footprint greatly. s1 = np.kron(gops.Inp, gops.Znp) s2 = 0.5*(gops.Znp + gops.Inp) s3 = np.kron(Xnp, np.kron(s2,s1)) s3 # where the matrix `s3` is given by $\mathbf{I}\otimes(\vert 0 \rangle\langle 0 \vert )\otimes(\mathbf{I}\otimes\sigma_z)$. aa = expm(-0.5*1j*np.pi*s3) fig = plt.figure() # + plt.subplot(121) plt.imshow(abs(aa)**2) plt.colorbar() plt.subplot(122) plt.imshow(np.angle(aa)) plt.colorbar() plt.show() # - # Next, we can demonstrate the usage of the `UnitaryFinder` class. As an example, say we are given an arbitrary 2x2 unitary matrix, and a set of gates implemenetd on our simulator/hardware platform. We cannot directly implement the given gate, but would like to decompose it (e.g. compile it) to the available gate-set. Using the matrix form given by `createS(i)`, we attempt to find the optimal gates to create this matrix. def createS(i): a = np.sqrt((i-1)/i) b = 1/np.sqrt(i) return dcm([a,b,-b,a]) np.matrix(createS(4).as_numpy()).flatten().tolist()[0] from PyQNLPSimulator import UnitaryFinder # Creating the UnitaryFinder object, we give the S matrix as an argument uf = UnitaryFinder(sim, np.matrix(createS(4).as_numpy()).flatten().tolist()[0]) # Next, we generate all possible combinations of gates in the given gate-set (taken from GateOps) up to a given depth. gate_op_combo = uf.genOps(uf.gates, depth=3) print(gate_op_combo[0:4]) # Lastly, we iterate through this gate combination list, and aim to find the gate combinations that best match our target matrix, to within a given tolerance. for i in gate_op_combo: res, vals = uf.findOps(i) if res: print(i, vals) # The values returned above list the gate combinations (applied right to left) and the parameters (angles for rotation matrices) that best-approximated the given unitary. # # For our example above of S4, some simple arithmetic shows that $S4 = \sigma_x RY(\cos^{-1}(1/2))\sigma_x$. A further investigation gives us a relationship for $S_i = \sigma_x RY(\cos^{-1}((i-2)/2)))\sigma_x$ as one of many possible combinations. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''venv'': venv)' # name: python_defaultSpec_1598173300831 # --- # + import math import arviz as az import matplotlib.pyplot as plt import pandas as pd from IPython.display import set_matplotlib_formats import jax.numpy as jnp from jax import lax, random from jax.scipy.special import expit, logit import numpyro import numpyro.distributions as dist import numpyro.optim as optim from numpyro.diagnostics import print_summary from numpyro.distributions.transforms import OrderedTransform from numpyro.infer import ELBO, MCMC, NUTS, SVI, Predictive, init_to_value from numpyro.infer.autoguide import AutoLaplaceApproximation # %config InlineBackend.figure_formats = ["svg"] az.style.use("arviz-darkgrid") numpyro.set_host_device_count(4) # + ratings_counts = jnp.array([12, 36, 7, 41]) pr_r = ratings_counts / ratings_counts.sum() cpr_r = jnp.cumsum(pr_r, -1) cpr_r # - lco_r = logit(cpr_r) lco_r plt.plot(range(1, 5), cpr_r, "--o") plt.gca().set(xlabel="rank", ylabel="cumulative proportion", ylim=(-0.1, 1.1)) plt.show() # + tags=[] hurricanes = pd.read_csv("../data/Hurricanes.csv", sep=";") d = hurricanes print(d.shape) display(d.sample(3)) d.describe() # - d.category.value_counts().sort_index() # # ## Hurricane fatalities and gender of names # # ### Description # # Data used in Jung et al 2014 analysis of effect of gender of name on hurricane fatalities. Note that hurricanes Katrina (2005) and Audrey (1957) were removed from the data. # # ### Format # # name : Given name of hurricane # year : Year of hurricane # deaths : number of deaths # category : Severity code for storm # min_pressure : Minimum pressure, a measure of storm strength; low is stronger # damage_norm : Normalized estimate of damage in dollars # female : Indicator variable for female name # femininity : 1-11 scale from totally masculine (1) to totally feminine (11) for name. Average of 9 scores from 9 raters. # import seaborn as sns sns.pairplot(d[['femininity', 'category', 'min_pressure', 'damage_norm', 'deaths']]) # + tags=[] def model1(deaths=None, femininity=None): a = numpyro.sample("a", dist.Normal(0, .3)) bF = numpyro.sample("bF", dist.Normal(0.2, .25)) lambda_ = numpyro.deterministic("lambda", jnp.exp(a + bF * femininity)) numpyro.sample("deaths", dist.Poisson(lambda_), obs=deaths) # + x = jnp.linspace(d.femininity.min(), d.femininity.max(), 100) lmu = Predictive(model1, num_samples=100, return_sites=["lambda"]).get_samples( random.PRNGKey(53), femininity=x, )["lambda"] plt.axhline(y=d.deaths.min(), c="k", ls="--") plt.axhline(y=d.deaths.max(), c="k", ls="--")#, lw=0.5) plt.title("prior pred") plt.xlabel('femininity') for i in range(40): plt.plot(x, lmu[i], "k", alpha=0.3) # + tags=[] mcmc = MCMC(NUTS(model1), 500, 500) mcmc.run(random.PRNGKey(0), femininity=d.femininity.values, deaths=d.deaths.values) mcmc.print_summary() post1 = mcmc.get_samples() # + x = jnp.linspace(d.femininity.min(), d.femininity.max(), 100) pred = Predictive(model1, post1, return_sites=["lambda"]).get_samples(random.PRNGKey(33), femininity=x) lmu = jnp.mean(pred['lambda'], 0) lci = jnp.percentile(pred['lambda'], q=(5.5, 94.5), axis=0) plt.scatter(d.femininity.values, d.deaths.values) plt.plot(x, lmu, 'k') plt.fill_between(x, lci[0], lci[1], color='k', alpha=0.2) # - def model2(deaths=None): a = numpyro.sample("a", dist.Normal(0, .3)) lambda_ = numpyro.deterministic("lambda", jnp.exp(a)) numpyro.sample("deaths", dist.Poisson(lambda_), obs=deaths) # + tags=[] mcmc = MCMC(NUTS(model2), 500, 500) mcmc.run(random.PRNGKey(0), deaths=d.deaths.values) mcmc.print_summary() post2 = mcmc.get_samples() # - def model3(deaths=None, femininity=None): a = numpyro.sample("a", dist.Normal(0, 5)) bF = numpyro.sample("bF", dist.Normal(0.2, .25)) phi = numpyro.sample("phi", dist.Exponential(1)) lambda_ = numpyro.deterministic("lambda", jnp.exp(a + bF * femininity)) numpyro.sample("deaths", dist.GammaPoisson(lambda_ / phi, 1 / phi), obs=deaths) # + tags=[] mcmc = MCMC(NUTS(model3), 500, 500) mcmc.run(random.PRNGKey(0), femininity=d.femininity.values, deaths=d.deaths.values) mcmc.print_summary() post3 = mcmc.get_samples() # + x = jnp.linspace(d.femininity.min(), d.femininity.max(), 100) pred = Predictive(model3, post3, return_sites=["lambda"]).get_samples(random.PRNGKey(33), femininity=x) lmu = jnp.mean(pred['lambda'], 0) lci = jnp.percentile(pred['lambda'], q=(5.5, 94.5), axis=0) plt.scatter(d.femininity.values, d.deaths.values) plt.plot(x, lmu, 'k') plt.fill_between(x, lci[0], lci[1], color='k', alpha=0.2) # - deaths_pred = Predictive(model3, post3)(random.PRNGKey(123), femininity=d.femininity.values)["deaths"] plt.scatter(d.femininity.values, d.deaths.values) plt.errorbar( d.femininity.values, jnp.mean(deaths_pred, 0), jnp.std(deaths_pred, 0) / 2, fmt="o", c="k", mfc="none", ms=7, elinewidth=1, ) plt.plot(d.femininity.values, jnp.percentile(deaths_pred, 5.5, 0), "k+") plt.plot(d.femininity.values, jnp.percentile(deaths_pred, 94.5, 0), "k+") # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # read in ECCO LLC data and regrid to uniform grid for saving as netcdf and plotting # # - first pip install stuff & restart kernal # #!pip install git+https://github.com/rabernat/xmitgcm.git@fix-llcreader-klevels-bug # !pip install ecco_v4_py # import libs # + import numpy as np import cmocean import xarray as xr import ecco_v4_py as ecco import xmitgcm.llcreader as llcreader # %matplotlib inline import holoviews as hv #from holoviews.operation.datashader import regrid #hv.extension('bokeh') import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (15,10) # %matplotlib inline import holoviews as hv from holoviews.operation.datashader import regrid hv.extension('bokeh') # - model = llcreader.ECCOPortalLLC2160Model() ds = model.get_dataset(k_levels=[0], type='latlon') ds ds ## read in data and get Theta model = llcreader.ECCOPortalLLC2160Model() ds_sst = model.get_dataset(varnames=['Theta'], k_levels=[0], type='latlon') ds_sst # ## regrid data onto new uniform grid using eccopy ds_sst.Theta.isel(time=-1,i=slice(5000,7000),j=slice(5000,6000)).plot() # + ecco_ds = ds_sst new_grid_delta_lat = 360/8640 new_grid_delta_lon = 360/8640 new_grid_min_lat = -90+new_grid_delta_lat/2 new_grid_max_lat = 90-new_grid_delta_lat/2 new_grid_min_lon = -180+new_grid_delta_lon/2 new_grid_max_lon = 180-new_grid_delta_lon/2 new_grid_lon, new_grid_lat, field_nearest_1deg =\ ecco.resample_to_latlon(ecco_ds.XC, \ ecco_ds.YC, \ ecco_ds.Theta.isel(time=-1),\ new_grid_min_lat, new_grid_max_lat, new_grid_delta_lat,\ new_grid_min_lon, new_grid_max_lon, new_grid_delta_lon,\ fill_value = np.NaN, \ mapping_method = 'nearest_neighbor', radius_of_influence = 120000) da = xr.DataArray(field_nearest_1deg,name='Theta',coords={'lat':new_grid_lat[:,0],'lon':new_grid_lon[0,:]},dims=('lat','lon')) da.to_netcdf('./../data/ecco2013_04_22_latlon360Theta.nc') da.coords['lon'] = np.mod(da['lon'], 360) da = da.sortby(da.lon) # output data da.to_netcdf('./../data/ecco2013_04_22_latlonTheta.nc') # - # ## switch from -180-180 to 0-360 to look at pacific ocean # ## start from here if just plotting existing data import cartopy import cartopy.crs as ccrs import xarray as xr import matplotlib.pyplot as plt #da = xr.open_dataset('./../data/ecco2011_11_19_latlon.nc') da = xr.open_dataset('./../data/ecco2013_04_22_latlonTheta.nc') plt.figure(figsize=(18,14)) #set the figure size ax = plt.subplot(1, 1, 1) #, projection=ortho) #create the axis for plotting q = da.sel(lat=slice(-60,-28),lon=slice(10,90)).Theta.plot(ax=ax, cmap='turbo', vmin=-1, vmax=22) # plot a colormap in transformed coordinates cmap='OrRd', plt.savefig('./../figures/theta_oneill_ortho_turbo.png') plt.figure(figsize=(18,7)) #set the figure size ax = plt.subplot(1, 1, 1) #, projection=ortho) #create the axis for plotting q = da.sel(lat=slice(45,63),lon=slice(180,242)).Theta.plot(ax=ax, cmap='turbo', vmin=2, vmax=9) # plot a colormap in transformed coordinates cmap='OrRd', plt.savefig('./../figures/theta_goa_ortho_turbo.png') plt.figure(figsize=(18,7)) #set the figure size ax = plt.subplot(1, 1, 1, projection=ccrs.Orthographic(180, 10)) #create the axis for plotting q = da.Theta.plot(ax=ax, cmap='turbo', transform=ccrs.PlateCarree(), vmin=2, vmax=9) # plot a colormap in transformed coordinates cmap='OrRd', ax.add_feature(cartopy.feature.COASTLINE) global_extent = ax.get_extent(crs=ccrs.PlateCarree()) gg = global_extent[:2] + (45,62) gg = (180,242)+gg[2:] ax.set_extent(gg, crs=ccrs.PlateCarree()) ax.add_feature(cartopy.feature.LAND) plt.savefig('./../figures/theta_goa_ortho_turbo2.png') # + import cartopy import cartopy.crs as ccrs ortho = ccrs.Orthographic(-170, 20) # define target coordinate frame geo = ccrs.PlateCarree() # define origin coordinate frame plt.figure(figsize=(18,14)) #set the figure size ax = plt.subplot(1, 1, 1, projection=ortho) #create the axis for plotting q = da.Theta.plot(ax=ax, transform = geo, cmap='turbo', vmin=2, vmax=9) # plot a colormap in transformed coordinates cmap='OrRd', ax.add_feature(cartopy.feature.COASTLINE) ax.add_feature(cartopy.feature.COASTLINE) plt.savefig('./../figures/theta_goa_ortho_turbo2.png') # - # ## you can also just plot data even if not on a regular grid plt.rcParams['figure.figsize'] = (20,10) ds_sst.Theta.isel(k=0,time=6200,j=slice(2500,6480),i=slice(3000,8000)).plot(vmin=5,vmax=29,cmap='turbo') plt.savefig('./../figures/theta_global.png') plt.rcParams['figure.figsize'] = (20,10) ds_sst.Theta.isel(k=0,time=6200,j=slice(2500,6480),i=slice(3000,8000)).plot(vmin=5,vmax=28,cmap='turbo') plt.savefig('./../figures/theta_global2.png') plt.rcParams['figure.figsize'] = (20,10) ds_sst.Theta.isel(k=0,time=6200,j=slice(2500,6480),i=slice(3000,8000)).plot(vmin=5,vmax=28,cmap='coolwarm') plt.savefig('./../figures/theta_global2_coolwarm.png') # + plt.rcParams['figure.figsize'] = (20,10) ds_sst.Theta.isel(k=0,time=6200,j=slice(2500,6480),i=slice(3000,8000)).plot(vmin=5,vmax=28,cmap=cmocean.cm.thermal) plt.savefig('././figures/theta_global2_thermal.png') # - # ### EXPLORE DATA ds_all = model.get_dataset(k_levels=[0], type='latlon') ds_all plt.rcParams['figure.figsize'] = (20,10) ds_all.oceQnet.isel(time=6200,j=slice(2500,6480),i=slice(3000,8000)).plot(vmin=-200,vmax=400,cmap=cmocean.cm.thermal) plt.savefig('./../figures/theta_global2_heatflux_thermal2.png') # + #testing for when to look dataset = hv.Dataset(ds_sst.Theta.isel(k=0,j=slice(3500,6100),i=slice(3000,8000),time=slice(1,18000,240)).astype('f4')) hv_im = (dataset.to(hv.Image, ['i', 'j'], dynamic=True) .options(cmap='Magma', width=950, height=600, colorbar=True)) # %output holomap='scrubber' fps=3 regrid(hv_im, precompute=True) # - # # make more data netcdf files # ## read in data and get Theta # # + model = llcreader.ECCOPortalLLC2160Model() ds_sst = model.get_dataset(varnames=['oceQnet'], k_levels=[0], type='latlon') ecco_ds = ds_sst new_grid_delta_lat = 360/8640 new_grid_delta_lon = 360/8640 new_grid_min_lat = -90+new_grid_delta_lat/2 new_grid_max_lat = 90-new_grid_delta_lat/2 new_grid_min_lon = -180+new_grid_delta_lon/2 new_grid_max_lon = 180-new_grid_delta_lon/2 new_grid_lon, new_grid_lat, field_nearest_1deg =\ ecco.resample_to_latlon(ecco_ds.XC, \ ecco_ds.YC, \ ecco_ds.oceQnet.isel(time=6200),\ new_grid_min_lat, new_grid_max_lat, new_grid_delta_lat,\ new_grid_min_lon, new_grid_max_lon, new_grid_delta_lon,\ fill_value = np.NaN, \ mapping_method = 'nearest_neighbor', radius_of_influence = 120000) da = xr.DataArray(field_nearest_1deg,name='oceQnet',coords={'lat':new_grid_lat[:,0],'lon':new_grid_lon[0,:]},dims=('lat','lon')) # output data da.to_netcdf('./data/ecco2011_11_19_latlon360oceQnet.nc') # - da # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline jagan = pd.read_csv('C:/Users/akano/Downloads\Machine-Learning-Using-Scikit-learn-and-other-libraries-master (2)/Machine-Learning-Using-Scikit-learn-and-other-libraries-master/1.Regression/data_jaganAnalysis.csv') tanco = pd.read_csv('C:/Users/akano/Downloads\Machine-Learning-Using-Scikit-learn-and-other-libraries-master (2)/Machine-Learning-Using-Scikit-learn-and-other-libraries-master/1.Regression/data_tanco1.csv') lemon = pd.read_csv('C:/Users/akano/Downloads\Machine-Learning-Using-Scikit-learn-and-other-libraries-master (2)/Machine-Learning-Using-Scikit-learn-and-other-libraries-master/1.Regression/data_lemonAnalysis.csv') tangelo = pd.read_csv('C:/Users/akano/Downloads\Machine-Learning-Using-Scikit-learn-and-other-libraries-master (2)/Machine-Learning-Using-Scikit-learn-and-other-libraries-master/1.Regression/data_tangeloAnalysis.csv') jagan.head() tanco.head() lemon.head() tangelo.head() jagan.describe() tanco.describe() lemon.describe() tangelo.describe() correlation_jagan = jagan.corr() correlation_tanco = tanco.corr() correlation_lemon = lemon.corr() correlation_tangelo = tangelo.corr() export_correlation_jagan = correlation_jagan export_correlation_jagan.to_csv("jagan result correlation") correlation_tanco correlation_lemon correlation_tangelo sns.heatmap(correlation_jagan) sns.heatmap(correlation_tanco) sns.heatmap(correlation_lemon) sns.heatmap(correlation_tangelo) sns.boxplot(data = jagan) sns.boxplot(data = tanco) sns.boxplot(data = lemon) sns.boxplot(data = tangelo) sns.boxplot(data = correlation_jagan) sns.boxplot(data = correlation_tanco) sns.boxplot(data = correlation_lemon) sns.boxplot(data = correlation_tangelo) jagan.columns ## Anova and p-value test import scipy.stats as stats from scipy.stats import ttest_ind from scipy.stats import f_oneway length = jagan['Length (mm)'] width = jagan['Width (mm)'] thickness = jagan['Thickness (mm)'] stats.f_oneway(length, width, thickness) x = tanco[['Length (mm)', 'Width (mm)', 'Thickness (mm)', 'Volume(mm3)']] y = tanco['Mass (g)'] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 101) # + x_train # - #fitting simple linear regression to the training set from sklearn.linear_model import LinearRegression regressor=LinearRegression() #fit the regressor regressor.fit(x_train,y_train) # + #y_train # + #y_test # - #predictig test results y_pred = regressor.predict(x_test) from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error mean_absolute_error(y_test, y_pred) mean_squared_error(y_test, y_pred) r2_score(y_test, y_pred) from sklearn import linear_model from sklearn. model_selection import cross_val_score lasso = linear_model.Lasso() print(cross_val_score(lasso, x, y, cv=5)) jagan.columns X = jagan[['Length (mm)', 'Width (mm)', 'Thickness (mm)', 'Volume(mm3)']] Y = jagan['Mass (g)'] # + from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state = 101, shuffle = True) #fitting simple linear regression to the training set from sklearn.linear_model import LinearRegression regressor=LinearRegression() #fit the regressor regressor.fit(X_train,Y_train) #predictig test results y_pred = regressor.predict(X_test) from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error # - mean_absolute_error(Y_test, y_pred) mean_squared_error(Y_test, y_pred) r2_score(Y_test, y_pred) from sklearn import linear_model from sklearn. model_selection import cross_val_score lasso = linear_model.Lasso() print(cross_val_score(lasso, x, y, cv=5)) X = lemon[['Length (mm)', 'Width (mm)', 'Thickness (mm)', 'Volume (mm3)']] Y = lemon['Mass (g)'] # + from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state = 101, shuffle = True) #fitting simple linear regression to the training set from sklearn.linear_model import LinearRegression regressor=LinearRegression() #fit the regressor regressor.fit(X_train,Y_train) #predictig test results y_pred = regressor.predict(X_test) from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error # - print(mean_absolute_error(Y_test, y_pred),mean_squared_error(Y_test, y_pred),r2_score(Y_test, y_pred) ) from sklearn import linear_model from sklearn. model_selection import cross_val_score lasso = linear_model.Lasso() print(cross_val_score(lasso, x, y, cv=5)) X1 = lemon[['Length (mm)', 'Width (mm)', 'Thickness (mm)', 'Volume (mm3)']] Y1 = lemon['Mass (g)'] # + from sklearn.model_selection import train_test_split X1_train, X1_test, Y1_train, Y1_test = train_test_split(X1, Y1, test_size = 0.1, random_state = 101, shuffle = True) #fitting simple linear regression to the training set from sklearn.linear_model import LinearRegression regressor=LinearRegression() #fit the regressor regressor.fit(X1_train,Y1_train) #predictig test results y_pred = regressor.predict(X1_test) from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error # - print(mean_absolute_error(Y1_test, y_pred),mean_squared_error(Y1_test, y_pred),r2_score(Y1_test, y_pred) ) from sklearn import linear_model from sklearn. model_selection import cross_val_score lasso = linear_model.Lasso() print(cross_val_score(lasso, x, y, cv=3)) parameter = tangelo[['Length (mm)', 'Width (mm)', 'Thickness (mm)', 'Volume (mm3)']] X = parameter X.shape Y.shape X = np.arange(0, len(X_train), 1) X = parameter Y = tangelo['Mass (g)'] # + from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state = 101, shuffle = True) #fitting simple linear regression to the training set from sklearn.linear_model import LinearRegression regressor=LinearRegression() #fit the regressor regressor.fit(X_train,Y_train) #predictig test results y_pred = regressor.predict(X_test) from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error # - print(mean_absolute_error(Y_test, y_pred),mean_squared_error(Y_test, y_pred),r2_score(Y_test, y_pred) ) X_train = np.arange(0, len(X_train), 1) X_train.shape # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aufgabe: Bildergalerie generieren # # Generieren Sie eine einfache Bildergalerie mit Python! # # Bevor Sie etwas tun: **Erstellen Sie eine Kopie dieses Notebooks über *File -> Make a copy...*** # ## Vorbereitung # # Überlegen Sie zu zweit oder im Team # # - worin das Problem der Aufgabe besteht # - welche Zutaten Sie brauchen # - wie die Zutaten kombiniert werden müssen, um die Aufgabe zu lösen # # Nutzen Sie das Jupyter Notebook ["Die pythonische Küche"](https://collaborating.tuhh.de/itbh-inf-wise201718/jupyter-notebooks/blob/master/die_pythonische_kueche.ipynb) als Quelle der Inspiration. # ## Problem # # Worin besteht das Problem? Schreiben Sie mit Ihren Worten, was ein Programm leisten müsste, das die Aufgabe lösen soll. # # Die folgenden Fragen können Ihr Denken anleiten: # # - Wie sehen (einfache) Bildergalerien im Netz aus? # - Wie lassen sich die Regeln eines Systems mit Worten beschreiben, das eine solche Galerie generiert? # Fangen Sie in etwa so an: # # > "Um eine Bildergalerie zu generieren, müssen die Bilder bekannt sein, aus denen sie bestehen wird." # ## Zutaten # # Was brauchen Sie? Schauen Sie ggf. noch einmal in ["Die pythonische Küche"](https://collaborating.tuhh.de/itbh-inf-wise201718/jupyter-notebooks/blob/master/die_pythonische_kueche.ipynb). # ## Programm # # Jetzt kommt's! Versuchen Sie, den Code zu schreiben. Nur Mut, es kann nichts passieren. # + # Generator für eine Bildergalerie # - # ## Ergebnissicherung # # Sichern Sie zum Ende der Einheit dieses Notebook durch einen Export: *File -> Download as -> Notebook (.ipynb)*. # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="4NNlA3ubQdd9" # # Assignment 7: # ## Implement merge sort that uses iterations and does not depend on recursions. # # + [markdown] id="WsQBmW_af15-" # # Iterative Merge Sort # + id="WtE9745O3htz" def merge_sort(List): size = 1 # size is the beginning size while size < len(List) -1: # this is a nested while loop. The outer loop is traversing for each sub array of the size left = 0 # left begins from 0 and eventually should be one less than its length while left < len(List)-1: # the middle index is minimum of the left index of the subarray which is iterable and key default which is length -1 middle = min((left + size -1), (len(List)-1)) # Conditional using size if 2 * size < len[List]-1 else len[List] -1 right = ((2*size + left-1, len(List)-1)[2*size + left-1 > len(List)-1]) # Function merge call merge(List, left, middle, right) left = left + size*2 #Increasing sub-array size by multiple of 2 size = 2 * size def merge(List, l, m, r): n1 = m-l+1 # numbers on the left n2 = r -m # numbers on the right L = [0]* n1 # list on the left R = [0]* n2 # list on the right for i in range (0, n1): L[i] = List[l+i] for i in range(0, n2): R[i] = List[m + i + 1] i, j, k = 0, 0, l # Begins the comparison and sorting with conditions. Indices increment while i < n1 and j < n2: if L[i] > R[j]: List[k] = R[j] j += 1 else: List[k] = L[i] i += 1 k += 1 while i < n1: List[k] = L[i] i +=1 k += 1 while j < n2: List[k] = R[j] j += 1 k += 1 # + colab={"base_uri": "https://localhost:8080/"} id="e9DBygFbZWjs" outputId="dd6dcb16-1c2e-476e-b3ac-54d1569d206f" List = [22, 11, 44, 55, 33] print("List: ") print (List) merge_sort(List) print("Sorted list: ") print(List) # + colab={"base_uri": "https://localhost:8080/"} id="fmXT8XqPXuZO" outputId="2c1c93d2-753b-4dff-d4ad-224e5a867630" L = [20, 51, 34, 95, 83, 15, 56, 76] print("List: ") print (L) merge_sort(L) print("Sorted list is ") print(L) # + [markdown] id="SpO0wWpUe5YJ" # ## Below is another merge sort that uses the merge from the textbook Guttag. The merge sort is iterative because there is no recursion. Both functions merge and merge sort are iterative because they don't use recursive. # ### The merge function below uses only the left, right and compare as its parameters. Unlike the above merge which uses an array, left, right, and middle for its parameters. # + id="rz2V7NCeX_4U" def merge_sort2(List): size = 1 while size < len(List) -1: left = 0 while left < len(List)-1: middle = min((left + size -1), (len(List)-1)) right = ((2*size + left-1, len(List)-1)[2*size + left-1 > len(List)-1]) merge2(left, right, compare = lambda x, y: x ", "photoUrl": "", "userId": "14614405936594177066"}} from google.colab import drive drive.mount('/content/a') # + id="TCEmmFvyRc4e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8c6da496-79e5-4bc8-88c7-ab1830156d05" executionInfo={"status": "ok", "timestamp": 1556947826264, "user_tz": -600, "elapsed": 26338, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} import pandas as pd from time import time import numpy as np from IPython.display import Image import keras import keras.backend as K from keras.engine.topology import Layer, InputSpec from keras.layers import Dense, Input, LSTM, RepeatVector from keras.models import Model from keras.optimizers import SGD from keras import callbacks from keras.initializers import VarianceScaling from keras.utils import plot_model from keras.callbacks import ModelCheckpoint,EarlyStopping from sklearn.cluster import KMeans, DBSCAN # + id="9GF92xvGVUhH" colab_type="code" colab={} filePath = 'a/My Drive/Research Data/TOP/probData/Prob_weekendaft.csv' df_prob = pd.read_csv(filePath) # df_prob # + id="wMElhxJTVXa6" colab_type="code" colab={} label = df_prob.iloc[:, 0] feature_input = df_prob.iloc[:, 1:] # + id="yGVbHNgXVVEq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="843c9dbd-f808-45e2-a546-8c2072a8b446" executionInfo={"status": "ok", "timestamp": 1556799157242, "user_tz": -600, "elapsed": 834, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} dims = [feature_input.shape[-1], 300, 500, 2000, 10] init = VarianceScaling(scale=1. / 3., mode='fan_in', distribution='uniform') pretrain_optimizer = SGD(lr=1, momentum=0.9) pretrain_epochs = 300 batch_size = 256 # + id="-_SlWGh1RxII" colab_type="code" colab={} def autoencoder(dims, act='relu', init='glorot_uniform'): """ Fully connected auto-encoder model, symmetric. Arguments: dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer. The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1 act: activation, not applied to Input, Hidden and Output layers return: (ae_model, encoder_model), Model of autoencoder and model of encoder """ n_stacks = len(dims) - 1 # input input_feature = Input(shape=(dims[0],), name='input') x = input_feature # internal layers in encoder for i in range(n_stacks-1): x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x) # hidden layer encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here x = encoded # internal layers in decoder for i in range(n_stacks-1, 0, -1): x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x) # output x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x) decoded = x return Model(inputs=input_feature, outputs=decoded, name='AE'), Model(inputs=input_feature, outputs=encoded, name='encoder') # + id="GmrI0eSfYecb" colab_type="code" colab={} autoencoder, encoder = autoencoder(dims, init=init) # + id="2oW2kE3-Yxv5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 976} outputId="54ca9915-fc87-41e8-c506-41345d177131" executionInfo={"status": "ok", "timestamp": 1556777558836, "user_tz": -600, "elapsed": 1651, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} plot_model(autoencoder, to_file='autoencoder.png', show_shapes=True) Image(filename='autoencoder.png') # + id="Kk73sHttZdu5" colab_type="code" colab={} autoencoder.compile(optimizer=pretrain_optimizer, loss='mse') autoencoder.fit(feature_input, feature_input, batch_size=batch_size, epochs=pretrain_epochs, verbose = 2) #, callbacks=cb) autoencoder.save_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/ae_weights.h5') # + id="iAmcUeMDZ5Y1" colab_type="code" colab={} autoencoder.load_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/ae_weights.h5') # + id="9xRMQ8effx3O" colab_type="code" colab={} y_pred = encoder.predict(feature_input) # + [markdown] id="-LeWUnHRBPok" colab_type="text" # # **Kmeans** # + id="a1dnYvIzf2Zb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d45845eb-c2db-4ad5-ed92-03cbaeec3a32" executionInfo={"status": "ok", "timestamp": 1556776011861, "user_tz": -600, "elapsed": 841, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} kmeans = KMeans(n_clusters=5, random_state=0).fit(y_pred) kmeans.labels_ # + id="EBLZq1YMgPUX" colab_type="code" colab={} result = pd.Series(kmeans.labels_) df_result = label.to_frame() df_result.loc[: ,"result"] = result # + id="9dCD69nTgT1D" colab_type="code" colab={} df_result.to_csv('a/My Drive/Research Data/TOP/AutoEncoder Clustering/result/AE+Kmeans/Cluster_label_weekdaymor.csv') # + id="faPtgDamgo1c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="2250730b-d2d1-4d6f-a3cd-5ef31da22691" executionInfo={"status": "ok", "timestamp": 1556769192837, "user_tz": -600, "elapsed": 336829, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} prob_dict = {} for cluster_num in range(5): temp_list = np.zeros(shape=500) df_temp = df_result.loc[df_result.result==cluster_num] num_of_device = df_temp.shape[0] print(num_of_device) for marker in df_temp.label.values.tolist(): temp_device = df_prob.loc[df_prob.label==marker].iloc[0, 1:].values temp_list = temp_list + temp_device prob_dict[str(cluster_num)] = temp_list/num_of_device df_cluster_prob = pd.DataFrame(prob_dict) df_cluster_prob.T.to_csv('a/My Drive/Research Data/TOP/AutoEncoder Clustering/result/AE+Kmeans/Cluster_prob_weekdaymor.csv') # + id="vqdfgc3Vmhbv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="e315b451-1d92-4651-8e27-d84782e149da" executionInfo={"status": "ok", "timestamp": 1556701352490, "user_tz": -600, "elapsed": 789, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} df_cluster_prob.T # + [markdown] id="8lZ-Rsn9Bbrz" colab_type="text" # # **DBSCAN** # + id="DK4Di7emBa9f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c313a1a3-ea24-496d-947e-3de2249b5bf7" executionInfo={"status": "ok", "timestamp": 1556777891872, "user_tz": -600, "elapsed": 318739, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} dbscan = DBSCAN(eps=0.5, min_samples=7).fit(y_pred) dbscan.labels_ # + id="6eQfzVcgEe5s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="126d25dd-136e-4b2a-9b43-56057897b62c" executionInfo={"status": "ok", "timestamp": 1556777891873, "user_tz": -600, "elapsed": 318497, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} a = set(dbscan.labels_.tolist()) a # + id="wvjP2WqoCxoh" colab_type="code" colab={} result = pd.Series(dbscan.labels_) df_result = label.to_frame() df_result.loc[: ,"result"] = result df_result.to_csv('a/My Drive/Research Data/TOP/AutoEncoder Clustering/result/AE+DBSCAN/Cluster_label_weekendaft.csv') # + id="1UZd38-XC17r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="1a4f9a6c-486a-433a-deff-a8097c35b69b" executionInfo={"status": "ok", "timestamp": 1556777896967, "user_tz": -600, "elapsed": 323072, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} prob_dict = {} for cluster_num in a: temp_list = np.zeros(shape=500) df_temp = df_result.loc[df_result.result==cluster_num] num_of_device = df_temp.shape[0] print(num_of_device) for marker in df_temp.label.values.tolist(): temp_device = df_prob.loc[df_prob.label==marker].iloc[0, 1:].values temp_list = temp_list + temp_device prob_dict[str(cluster_num)] = temp_list/num_of_device df_cluster_prob = pd.DataFrame(prob_dict) df_cluster_prob.T.to_csv('a/My Drive/Research Data/TOP/AutoEncoder Clustering/result/AE+DBSCAN/Cluster_prob_weekendaft.csv') # + [markdown] id="OLATSTwXBUE1" colab_type="text" # # **LSTM Test** # + id="Mw5k5NWPaAA9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="2820f37e-c1c2-4eaf-8bdf-eec0393bae7a" executionInfo={"status": "ok", "timestamp": 1556799163527, "user_tz": -600, "elapsed": 1152, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} latent_dim = 10 timesteps = 500 input_dim = 10 inputs = Input(shape=(timesteps, input_dim)) encoded = LSTM(latent_dim)(inputs) decoded = RepeatVector(timesteps)(encoded) decoded = LSTM(input_dim, return_sequences=True)(decoded) sequence_autoencoder = Model(inputs, decoded) encoder = Model(inputs, encoded) encoder.summary() sequence_autoencoder.summary() # + id="inPPzTpjECje" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d406053c-f071-49e5-9685-eef6f1e0910b" executionInfo={"status": "ok", "timestamp": 1556799180935, "user_tz": -600, "elapsed": 854, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} print(feature_input.shape) lstm_input = feature_input.values[:, :, np.newaxis] print(lstm_input.shape) # + id="DipeV8DiDJIF" colab_type="code" colab={} adam_opti = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) sequence_autoencoder.compile(optimizer=adam_opti, loss='mse') sequence_autoencoder.fit(lstm_input, lstm_input, batch_size=batch_size, epochs=pretrain_epochs, verbose = 2) #, callbacks=cb) sequence_autoencoder.save_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/lstm_weights.h5') # + id="GHMD52ljdmF3" colab_type="code" colab={} sequence_autoencoder.load_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/lstm_weights.h5') y_pred = encoder.predict(lstm_input) # + [markdown] id="e9VPR5Ad5_Jb" colab_type="text" # # **LSTM script** # + id="GRA01eeU58l4" colab_type="code" colab={} input_file_path = 'a/My Drive/Research Data/TOP/probData/LSTM/Prob/Prob_{}_{}.csv' kmeans_out = 'a/My Drive/Research Data/TOP/probData/LSTM/LSTM+Kmeans/{}_LSTM_Kmeans_{}_{}.csv' dbscan_out = 'a/My Drive/Research Data/TOP/probData/LSTM/LSTM+DBSCAN/{}_LSTM_DBSCAN_{}_{}.csv' weekList = ['weekday_mor', 'weekday_aft', 'weekend_mor', 'weekend_aft'] monthList = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct'] cluster_num = [5]#[6, 7] eps_List = [0.2, 0.3, 1.3] porb_in_Path = 'a/My Drive/Research Data/TOP/probData/M10/{}.csv' # + id="ccSERoxn9lTa" colab_type="code" colab={} def auto_LSTM(input_file_path, monthList, weekName): dfList = [] for monthName in monthList: df_temp = pd.read_csv(input_file_path.format(monthName, weekName), index_col=0) dfList.append(df_temp) lstm_input = np.empty([2717, 10, 500]) for ii in range(2717): for iii in range(10): lstm_input[ii, iii] = dfList[iii].iloc[ii, :].values lstm_input = lstm_input.transpose((0, 2, 1)) # create model latent_dim = 10 timesteps = 500 input_dim = 10 inputs = Input(shape=(timesteps, input_dim)) encoded = LSTM(latent_dim)(inputs) decoded = RepeatVector(timesteps)(encoded) decoded = LSTM(input_dim, return_sequences=True)(decoded) sequence_autoencoder = Model(inputs, decoded) encoder = Model(inputs, encoded) encoder.summary() sequence_autoencoder.summary() pretrain_epochs = 200 batch_size = 256 adam_opti = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) sequence_autoencoder.compile(optimizer=adam_opti, loss='mse') sequence_autoencoder.fit(lstm_input, lstm_input, batch_size=batch_size, epochs=pretrain_epochs, verbose = 2) #, callbacks=cb) sequence_autoencoder.save_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/lstm_weights.h5') sequence_autoencoder.load_weights('a/My Drive/Research Data/TOP/AutoEncoder Clustering/lstm_weights.h5') y_pred = encoder.predict(lstm_input) return y_pred def auto_kmeans(y_pred, cluster_core, label_output, prob_output, label, df_prob): kmeans = KMeans(n_clusters=cluster_core, random_state=0).fit(y_pred) result = pd.Series(kmeans.labels_) df_label = label.to_frame() df_label.loc[: ,"result"] = result df_label.to_csv(label_output) prob_dict = {} for cluster_num in range(cluster_core): temp_list = np.zeros(shape=500) df_temp = df_label.loc[df_label.result==cluster_num] num_of_device = df_temp.shape[0] print(num_of_device) for marker in df_temp.street_marker.values.tolist(): temp_device = df_prob.loc[df_prob.street_marker==marker].iloc[0, 1:].values temp_list = temp_list + temp_device prob_dict[str(cluster_num)] = temp_list/num_of_device df_cluster_prob = pd.DataFrame(prob_dict) df_cluster_prob.T.to_csv(prob_output) def auto_dbscan(y_pred, eps_val, label_output, prob_output, label, df_prob): dbscan = DBSCAN(eps=eps_val, min_samples=7).fit(y_pred) a = set(dbscan.labels_.tolist()) result = pd.Series(dbscan.labels_) df_label = label.to_frame() df_label.loc[: ,"result"] = result df_label.to_csv(label_output) prob_dict = {} for cluster_num in a: temp_list = np.zeros(shape=500) df_temp = df_label.loc[df_label.result==cluster_num] num_of_device = df_temp.shape[0] print(num_of_device) for marker in df_temp.street_marker.values.tolist(): temp_device = df_prob.loc[df_prob.street_marker==marker].iloc[0, 1:].values temp_list = temp_list + temp_device prob_dict[str(cluster_num)] = temp_list/num_of_device df_cluster_prob = pd.DataFrame(prob_dict) df_cluster_prob.T.to_csv(prob_output) # + id="3Xm7JO_-_yQN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 29770} outputId="fe658c94-f05d-4602-b65a-03a1a060c097" executionInfo={"status": "ok", "timestamp": 1556954005327, "user_tz": -600, "elapsed": 6151844, "user": {"displayName": "", "photoUrl": "", "userId": "14614405936594177066"}} for weekName in weekList: reference_path = porb_in_Path.format(weekName) print("Start reading file: " + reference_path) df_prob = pd.read_csv(reference_path) label = df_prob.iloc[:, 0] feature_input = df_prob.iloc[:, 1:] y_pred = auto_LSTM(input_file_path, monthList, weekName) for cluster_core in cluster_num: kmeans_label_output = kmeans_out.format('label', weekName, cluster_core) kmeans_prob_output = kmeans_out.format('prob', weekName, cluster_core) auto_kmeans(y_pred, cluster_core, kmeans_label_output, kmeans_prob_output, label, df_prob) for eps_val in eps_List: dbscan_label_output = dbscan_out.format('label', weekName, eps_val) dbscan_prob_output = dbscan_out.format('prob', weekName, eps_val) auto_dbscan(y_pred, cluster_core, dbscan_label_output, dbscan_prob_output, label, df_prob) print("Finish processing file: " + reference_path) # + id="hrXHa7Ab8udS" colab_type="code" colab={} # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Atmospheric Toolbox](https://atmospherictoolbox.org/media/filer_public_thumbnails/filer_public/6d/35/6d35dffd-43f1-43ec-bff6-5aa066c8aabc/toolbox-header.jpg__1080x416_q85_subsampling-2.jpg) # # # Atmospheric Toolbox - HARP comparison # # This practical will show you how to compare Sentinel-5P satellite data against ground based data by making use of the [ESA Atmospheric Toolbox](https://atmospherictoolbox.org/). # In this exercise we will be focusing primarily on HARP as the toolset to do this. # We will use Sentinel-5P Level2 NO2 data and compare this against both a MAXDOAS and Pandora instrument that is located in Athens, Greece. # Both MAXDOAS and Pandora are DOAS instruments. MAXDOAS is a MAXDOAS type instrument and Pandora uses the DirectSun approach. # # You can find an explanation on the different measurement techniques on the [FRM4DOAS website](https://frm4doas.aeronomie.be/index.php/project-overview/doas) # ![doas-techniques](https://frm4doas.aeronomie.be/ProjectDir/doasinstruments.png) # # The main difference to be aware of is the altitude range for which the measurements are applicable. # # The MAXDOAS measurements only provide information on the troposphere, so we will use this data to compare against the tropospheric NO2 column information from S5P (`tropospheric_NO2_column_number_density` variable). # # The Pandora measurements, on the other hand, provide information on the total column. So we will use that data to compare against the total NO2 column from S5P (`NO2_column_number_density` variable). # For this exercise we will look at data from February 2020. # # The Sentinel-5P data was retrieved from the [Sentinel-5P Pre-Operations Data Hub](https://s5phub.copernicus.eu/dhus/#/home). # # The MAXDOAS data was retrieved from [NDACC](http://www.ndaccdemo.org) and the Pandora data from [Pandonia](https://www.pandonia-global-network.org). Both datasets are also available through [EVDC](https://evdc.esa.int). # ## S5P vs. MAXDOAS NO2 comparison # # In order to perform a comparison for the full month of February 2020, we would need a full month of Sentinel-5P data. # Even if we already filter for only those products that have data over Athens, we would still end up with about 48 orbits (note that, because orbits slightly overlap, we have multiple overpasses within a single day for some days). # # Since we are only interested in the data around Athens, we ideally don't want keep the full 450MB for each L2 product, but only the satellite data around the area. # # A convenient first step is therefore to create so-called _overpass files_. We can do this with HARP, by providing a geographic filter on the location of the MAXDOAS instrument, which is at 38.05 latitude and 23.86 longitude. # # As an example we will perform such a filter on the NO2 data from the regridding exercise from yesterday (which was data from 15 September 2020). import csv import datetime import harp import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import Normalize import cartopy.crs as ccrs filename = "../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T002200_20200915T020329_15147_01_010302_20200916T170359.nc" # since the area_covers_point filter is quite slow, we add some explicit filters # on latitude (which is fast) to already exclude a large part of the product operations = "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" try: overpass = harp.import_product(filename, operations) except harp.NoDataError: print('No overpasses found') # We see that this product did not contain any matches. If that happens you get an error which you can catch using this `try`/`catch` approach. # # If we try this filter for a product that actually does contain an overpass we get: filename = "../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc" operations = "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" try: overpass = harp.import_product(filename, operations) except harp.NoDataError: print('No overpasses found') print(overpass) # You can see that we only got one measurement for each variable. # # Instead of reading this data in Python, we actually want to have this data stored as a file on disk. # This allows us to reuse it later as input for our comparisons (and we can then throw away the original L2 products). # # To do this we could use a combination of `harp.import_product()` and `harp.export_product()` in Python. # However, it is actually faster to call the `harpconvert` tool from the command line. # # You can call command line tools from within a python notebook by prefixing the command with a `!`. # This is an IPython features that is described in the [documentation](https://ipython.readthedocs.io/en/stable/interactive/python-ipython-diff.html#shell-assignment). # We will use this several times in this exercise. # # Be aware that the commands that we will execute are Linux-style commands which will work on Linux and macOS, but may not work on Windows (without some modification to path references and/or usage of quotes). # # To convert the product using `harpconvert` we can use: # !harpconvert -a "latitude>36;latitude<40;area_covers_point(38.05, 23.86)" ../eodata/sentinel5p/no2/2020/09/15/S5P_OFFL_L2__NO2____20200915T103056_20200915T121226_15153_01_010302_20200917T040857.nc s5p_l2_no2_15153_athens.nc # And we can then read in this overpass file in Python using: overpass = harp.import_product("s5p_l2_no2_15153_athens.nc") print(overpass) # Note that the product contains a `history` attribute that shows how HARP generated the file. # HARP will include such history information in each file that it writes, which is very useful for traceability. print(overpass.history) # For the month of February we already created such overpass files for you, which are available in the `../eodata/sentinel5p/overpass/no2/athens` directory. These files are actually the official overpass files that are used by the [Sentinel-5P Mission Performance Center Validation Facility](http://mpc-vdaf.tropomi.eu). # # These files contain not just the pixel over Athens itself, but also a range of pixels around that area. This allows the validation experts to investigate other criteria such as the spatial homogeneity of the data. filename = "../eodata/sentinel5p/overpass/no2/athens/S5P_OFFL_L2VONO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211_athens.nc" overpass_11932 = harp.import_product(filename) print(overpass_11932) # As you can see from the `history` attribute this overpass file was just a filtering of the original proudct using a polygon area; no other HARP operations were performed. # We can use the `harp_l2geoscatterplot` from the regridding exercise from yesterday to plot this overpass data: def harp_l2geoscatterplot(product, value, colorrange=None, colortable='jet', size=1): variable = product[value] if colorrange is not None: vmin, vmax = colorrange else: vmin = np.nanmin(variable.data) vmax = np.nanmax(variable.data) fig=plt.figure(figsize=(20, 10)) ax = plt.axes(projection=ccrs.PlateCarree()) img = plt.scatter(product.longitude.data, product.latitude.data, c=variable.data, vmin=vmin, vmax=vmax, cmap=colortable, s=size, transform=ccrs.PlateCarree()) ax.coastlines() cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.04, pad=0.1) cbar.set_label(f'{variable.description} [{variable.unit}]') cbar.ax.tick_params(labelsize=14) plt.show() harp_l2geoscatterplot(overpass_11932, 'tropospheric_NO2_column_number_density', colorrange=(0,0.0001), size=30) # Now that we have the satellite data, we can start collocating the data with the MAXDOAS data. # # What we want, is to know which satellite measurements match up in time and space with which MAXDOAS measurements. # # The `harpcollocate` command line tool is designed to answer this question. It will take distance criteria on e.g. time and space and produce a list of pairs of measurements where the satellite and reference data match. # # You can get a quick help reference by passing the `--help` argument to the harpcollocate tool. # !harpcollocate --help # As a time distance criterium we are interested in measurements that are only half an hour apart. And for the spatial distance, we are only interested on satellite pixels that are directly over the MAXDOAS instrument. # # The command with this criteria will then be: # !harpcollocate -d "datetime 0.5 [h]" --point-in-area-yx ../eodata/sentinel5p/overpass/no2/athens ../eodata/groundbased/maxdoas/athens collocations_maxdoas_full.csv # This command produced a `csv` file called `collocations_maxdoas_full.csv` that contains the matching pairs. with open('collocations_maxdoas_full.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: print(', '.join(row)) # What you will see on each line is: # - a unique identifier of the collocation pair (the `collocation_index`) # - a reference to the satellite product # - an index of the measurement within the satellite product # - a reference to the maxdoas product # - an index of the measurement within the maxdoas product # - the distance (in time) between the two measurements # # Note that the reference to the satellite product is the orginal L2 product. Also, the 'index' of the satellite measurement is the index of the pixel in the original L2 product (this index value is stored as an `index` variable in the overpass file). # The advantage of this, is that you can get the measurement directly from the original L2 product again without having to have access to the overpass file. # All the operations we perform below on the overpass files using this collocation result file can actually also still be performed on the original L2 products as well. This makes it easy to share a collocation result file with someone else who doesn't have your overpass files. That person can then download the original products and use the collocation result file to quickly extract the collocated measurements. # We can see in the list that sometimes the same satellite measurement appears twice (e.g. `S5P_OFFL_L2__NO2____20200201T094106_20200201T112236_11932_01_010302_20200204T143211.nc` measurement `1450403`). This is because within the the given half hour time distance there are multiple MAXDOAS measurements that match that criteria. # # We can instruct HARP to only take the nearest MAXDOAS measurement in that case by providing the `-nx datetime` option to `harpcollocate`. # Also, the collocations that we produced were actually not filtered for quality. We actually only want measurements that are 'of good quality'. # For the S5P data this means applying the `tropospheric_NO2_column_number_density_validity > 75` filter (as we have seen in the exercise from yesterday) and for MAXDOAS we are only going to filter out NaN values (which can be done using the `valid(tropospheric_NO2_column_number_density)` filter. # # We can pass these filters as part of the `harpcollocate` command line using the `-aa` and `-ab` parameters. # If we add the `-nx` and `-aa` and `-ab` options we get: # !harpcollocate -d "datetime 0.5 [h]" --point-in-area-yx -nx datetime -aa "tropospheric_NO2_column_number_density_validity > 75" -ab "valid(tropospheric_NO2_column_number_density)" ../eodata/sentinel5p/overpass/no2/athens ../eodata/groundbased/maxdoas/athens collocations_maxdoas.csv # Now that we know which measurements pair up, we need to filter both the satellite data and the MAXDOAS data to provide us the data for those pairs. # # We do this by using the `collocate_left()` and `collocate_right()` HARP operations. The `collocate_left()` filters based on the information that is on the _left_ for each pair (i.e. the satellite data) and `collocate_right()` the information that is on the _right_ (i.e. the MAXDOAS data). # # In addition, we need to add several other operations that allow us to make sure that variables have the same units for both the satellite and maxdoas data. # + filepattern = "../eodata/sentinel5p/overpass/no2/athens/*" operations = ';'.join([ 'collocate_left("collocations_maxdoas.csv")', 'derive(datetime {time} [days since 2000-01-01])', 'derive(tropospheric_NO2_column_number_density [Pmolec/cm2])', 'derive(tropospheric_NO2_column_number_density_uncertainty {time} [Pmolec/cm2])', 'sort(collocation_index)', ]) s5p = harp.import_product(filepattern, operations) filepattern = "../eodata/groundbased/maxdoas/athens/*" operations = ';'.join([ 'collocate_right("collocations_maxdoas.csv")', 'derive(datetime {time} [days since 2000-01-01])', 'derive(tropospheric_NO2_column_number_density [Pmolec/cm2])', 'derive(tropospheric_NO2_column_number_density_uncertainty {time} [Pmolec/cm2])', 'sort(collocation_index)', ]) maxdoas = harp.import_product(filepattern, operations) # - # You will see that the imported s5p and maxdoas data now contain the same amount of measurements. And by sorting both datasets by the `collocation_index` we make sure that all the measurements are nicely aligned. print(s5p) print(maxdoas) # We can now plot the s5p and maxdoas data side-by-side # + fig = plt.figure(figsize=(20, 10)) plt.title("S5P vs. MAXDOAS - February 20202", fontsize=20) t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in s5p.datetime.data] plt.errorbar(t, s5p.tropospheric_NO2_column_number_density.data, yerr=s5p.tropospheric_NO2_column_number_density_uncertainty.data, fmt='o', capsize=5, label="s5p") t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in maxdoas.datetime.data] plt.errorbar(t, maxdoas.tropospheric_NO2_column_number_density.data, yerr=maxdoas.tropospheric_NO2_column_number_density_uncertainty.data, fmt='o', capsize=5, label="maxdoas") fig.autofmt_xdate() plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylabel(f'{s5p.tropospheric_NO2_column_number_density.description} [{s5p.tropospheric_NO2_column_number_density.unit}]', fontsize=16) fig.legend(loc='right', prop={'size': 14}) plt.show() # - # We can also plot the difference. This can be done using: # + fig = plt.figure(figsize=(20, 10)) plt.title("S5P vs. MAXDOAS - February 20202", fontsize=20) t = [datetime.datetime(2000,1,1) + datetime.timedelta(x) for x in s5p.datetime.data] diff = s5p.tropospheric_NO2_column_number_density.data - maxdoas.tropospheric_NO2_column_number_density.data # propagate uncertainty by taking the squared sum of the invidual uncertainties of s5p and maxdoas err = np.sqrt(s5p.tropospheric_NO2_column_number_density_uncertainty.data**2 + maxdoas.tropospheric_NO2_column_number_density_uncertainty.data**2) plt.errorbar(t, diff, yerr=err, fmt='o', capsize=5, label="s5p - maxdoas") fig.autofmt_xdate() plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylabel(f'tropospheric NO2 difference (S5P-MAXDOAS) [{s5p.tropospheric_NO2_column_number_density.unit}]', fontsize=14) plt.show() # - # This comparison is actually also available on the [Validation Server](https://mpc-vdaf-server.tropomi.eu) that is used by the S5P Mission Performance center. # # You can compare the results by looking at the [report for NO2 MAXDOAS for Athens](https://mpc-vdaf-server.tropomi.eu/no2/no2-offl-maxdoas/athens). If, on that page, you click on the 'Select Date Range' on the top right, and then select the 'Month' results for 2020 Feb, you should see the same results as in the plots above. # ## S5P vs. Pandora NO2 comparison # # **EXERCISE**: extend this notebook by performing a comparison and creating plots for S5P vs. Pandora. # # You should use the same steps of running a `harpcollocate` and then importing the S5P and Pandora data using the `collocate_left()` and `collocate_right()` operations. The collocation criteria are the same (i.e. measurements within half an hour, Pandora points in satellite pixel area, and select nearest Pandora in time for each satellite measurement). # # Some differences to take into account: # - You need to use `NO2_column_number_density` wherever we used `tropospheric_NO2_column_number_density` # - You need to use `NO2_column_number_density_uncertainty` wherever we used `tropospheric_NO2_column_number_density_uncertainty` # - The Pandora data can be found in `../eodata/groundbased/pandora/athens` # - You will still need to filter the S5P data based on `tropospheric_NO2_column_number_density_validity` (even if we use the total column) # - The official filter for the Pandora data is: `NO2_column_number_density_validity!=1;NO2_column_number_density_validity!=2;NO2_column_number_density_validity!=11;NO2_column_number_density_validity!=12` # - Use `collocations_pandora.csv` as the filename for the collocation results # # Be aware that the collocation with Pandora takes a bit longer, because there is a lot more Pandora measurements available within a month. # # You can compare your final graphs by looking at the [report for Pandora on the Validation Server](https://mpc-vdaf-server.tropomi.eu/no2/no2-offl-pandora/athens). # Be aware that the results will be slightly different. This is because the comparison on the Validation Server uses an average of all Pandora measurements that match a satellite measurement (instead of taking the nearest in time). But this averaging is an advanced step that we will not cover in this exercise. # # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python38myenv # language: python # name: python3 # --- # default_exp loss # %load_ext lab_black # nb_black if running in jupyter # %load_ext autoreload # automatically reload python modules if there are changes in the # %autoreload 2 # hide from nbdev.showdoc import * # # Loss # # > Train and evaluate your algorithms on real data. You can also save the model for later use, or deploy it to production! # # ***input:*** clean and tidy dataset from data notebook + ML model class from hypotheses space notebook # # ***output:*** evaluated, trained and (optionally) deployed model # # ***description:*** # # In this notebook you train and evaluate ML methods implemented with the whole dataset. # You can also save the model for later use, or deploy it to production environment, or use this notebook as the final output. # # Edit this and the other text cells to describe your project. # ## Import relevant modules # + # export import numpy as np # your code here # - # ## Define notebook parameters # + tags=["parameters"] # This cell is tagged with 'parameters' seed = 0 # - # make direct derivations from the parameters: np.random.seed(seed) # your code here # ## Load clean and tidy dataset # + # your code here # - # > Note that depending on the file format and your variables, you might have to refefine datatypes in your dataframe! # ## Split the data into training, testing and validation data # + # your code here # - # ## Train and evaluate the models # + # your code here # - # ## Visualize the results # + # your code here # - # You can also include statistical tests! # + # your code here # - # ## Validate model (if hyperparameters are optimized) # + # your code here # - # ## Visualize validation # + # your code here # - # ## Acceptance testing & deployment # # Is the model 'good enough' for production? # # Define criterion and tests for 'good enough' and automatic model save / deployment process. # + # your code here # EXAMPLE: model.deploy() if val_loss > 0.85 else raise ModelValidationError # - # ## Conclusions # # How the results look like? # ## Output of this notebook # # Saved or deployed trained model # ## You can now move on to the workflow notebook! # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %%capture # %matplotlib notebook import numpy as np import math import matplotlib.pyplot as plot from matplotlib import ticker from sklearn.metrics import r2_score ############################################################################### # Lasso and Linear from sklearn.linear_model import Lasso,LinearRegression import matplotlib from mpl_toolkits.mplot3d import Axes3D import scipy.optimize as sciop from scipy.optimize import basinhopping # !make fortran; import irreverisble #importing the fortran mechanics routine global exp exp = [] # ***** target exp = np.loadtxt('ref/HSRS/22') def error_evaluation_rms(errors): sum_of_squares = 0 for error in errors: sum_of_squares = sum_of_squares + error**2 return ((sum_of_squares/len(errors))**(1./2.)) #incorporated division by n, which is the proper rms def mcfunc(model_parameters): # -------------- number samples, =1 in this case no_samples = 1 T_service = 22. + 273. prec_stress = 0 SS_stress = 750 strain_stress, WTN = irreverisble.mechanics(prec_stress,SS_stress,T_service,model_parameters,no_samples) strain_stress = np.array(np.trim_zeros(strain_stress)).reshape(-1,2) #print strain_stress #---------------------------- cal_val = [] errors = [] #traverses experimental data points for iexp, data in enumerate(exp[:,0]): #finding nearest neighbors that surround the data points, and using them to determine the error for ical, data in enumerate(strain_stress[:,0]): ical = ical-1 # May or may not be advantageous to keep this instead of the range attribute for mem save left_strainpoint = strain_stress[ical,0] right_strainpoint = strain_stress[ical+1,0] exp_datapoint = exp[iexp,0] if(exp_datapoint>left_strainpoint and exp_datapoint #include #include #include void print_vector(float *v, int size) { printf("\n"); for (int i = 0; i < size; i++) { printf("%f\n", v[i]); } printf("\n"); } void init_vector(float *v, int size) { for (int i = 0; i < size; i++) { v[i] = (float)i; } } double calcularsegundos(struct timeval ti, struct timeval tf) { return (tf.tv_sec - ti.tv_sec) + (tf.tv_usec - ti.tv_usec) / 1000000.0; } typedef struct th_info { pthread_t thread; int size; int id; float *v1; float *v2; float *v3; } th_info; void *pth_func(void *args) { th_info *th_info_args = (th_info *)args; for (int i = 0; i < th_info_args->size; i++) { th_info_args->v3[i] = th_info_args->v1[i] + th_info_args->v2[i]; } return NULL; } void parallel_vector_sum(float *v1, float *v2, float *v3, int tam, int ths) { int size = (tam / ths); th_info threads[ths]; for (int i = 0; i < ths; i++) { threads[i].size = size; threads[i].id = i; threads[i].v1 = v1 + i * size; threads[i].v2 = v2 + i * size; threads[i].v3 = v3 + i * size; } pth_func((void *)&threads[0]); if (ths > 1) { for (size_t i = 1; i < ths; ++i) { pthread_create(&threads[i].thread, NULL, pth_func, (void *)&threads[i]); } for (size_t i = 1; i < ths; ++i) { pthread_join(threads[i].thread, NULL); } } } int main(int argc, char **argv) { if (argc != 3) { printf("Usage: %s \n", argv[0]); exit(-1); } struct timeval ti, tf; int tam = atoi(argv[1]); int ths = atoi(argv[2]); float *v1 = (float *)malloc(tam * sizeof(float)); float *v2 = (float *)malloc(tam * sizeof(float)); float *v3 = (float *)malloc(tam * sizeof(float)); // init vectores init_vector(v1, tam); init_vector(v2, tam); gettimeofday(&ti, NULL); parallel_vector_sum(v1, v2, v3, tam, ths); gettimeofday(&tf, NULL); double segundos = calcularsegundos(ti, tf); printf("%s,%s,%lf\n",argv[1],argv[2],segundos); free(v1); free(v2); free(v3); return 0; } # - # ! gcc sumavectpth.c -o sumavectpth -pthread # ! ./sumavectpth # Archivo de bash que servirá para hacer las pruebas y registrar los resultados en archivos de valores separados por comas # + # %%file perfil.sh printf "n,ths,tiempo\n" > perfil.csv for n in 5000000 10000000 50000000 100000000 do for th in 1 2 3 4 6 8 do for i in {1..20} do ./sumavectpth $n $th >> perfil.csv done done done # - # Se ejecuta el archivo de bash # ! bash perfil.sh # Leer el archivo CSV perfil = pd.read_csv('perfil.csv',delimiter=',') perfil # Obtener la columna de tamagnos y eliminar los valores repetidos ns = perfil['n'].drop_duplicates().to_numpy() ns # Obtener la columna de la cantidad de threads y eliminar los valores repetidos ths = perfil['ths'].drop_duplicates().to_numpy() ths # Gráfica que muestra como varía el tiempo promedio para cada tamagno, de acuerdo al número de threads, luego de realizar una mediana desplazante de ventana 7. for n in ns: tn = [] for th in ths: tn.append(perfil[(perfil['n'] == n) & (perfil['ths'] == th)]['tiempo'].rolling(7,min_periods=1).median().mean()) plt.plot(ths,tn,'-o',label=str(n)) plt.legend() plt.xlabel('cantidad de threads') plt.ylabel('tiempo promedio') plt.tight_layout() # Subgráficos para observar con más detalle como es la variación del tiempo en cada tamagno r = 2 c = 2 for i in range(r): for j in range(c): ind = i*r+j plt.subplot(r,c,ind+1) ts = perfil[perfil['n'] == ns[ind]].groupby('ths').mean()['tiempo'].to_numpy() plt.plot(ths,ts,'-o',label='n='+str(ns[ind])) plt.legend() plt.tight_layout() # En las gráficas anteriores se observa para los cuatro tamagnos, que el mayor tiempo no ocurrió necesariamente cuando se usó un solo _thread_. Sin embargo, el tiempo con un solo _thread_ será la referencia para calcular el _speedup_. for n in ns: ts = perfil[perfil['n'] == n].groupby('ths').mean()['tiempo'].to_numpy() plt.plot(ths[1:],ts[0]/ts[1:],'-o',label=str(n)) plt.legend() plt.xlabel('cantidad de threads') plt.ylabel('speedup') plt.tight_layout() # ! rm *.c # ! rm *.sh # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # Open In Colab # + [markdown] id="Lx0MPElRyqyB" # In this notebook, we will use artificial neural networks and supervised machine learning to classify phases of the nearest-neighbor Ising model with ferromagnetic interactions. The training and test datasets have been generated separately. # + [markdown] id="3-cTCMEBgtwU" # ### Import necessary libraries # + id="ib9-cuY2gYpC" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tensorflow import keras from keras.models import Sequential from keras.layers import Dense from tensorflow.keras import regularizers from keras.utils import to_categorical # + [markdown] id="6F7Of1TMgzmK" # ### Read the data file for training the model. # + id="0jbMWAKMgqrh" ising_data = pd.read_csv('/content/drive/MyDrive/Data Science/Data_Sets/Ising/Ising_data1.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="xw5xgLzthism" outputId="13983a14-6f89-4b2c-d9c2-635d7a9008d9" ising_data.head() # + [markdown] id="GOvBFQKCjc2_" # The data file contains lattice configurations obtained using Monte-Carlo simulations of the nearest-neighbour ferromagnetic Ising model. For each initial high-temperature configuration, the temperature is gradually reduced from $T/J = 4.9$ to $T/J = 0.5$ in steps of $\delta T/J = 0.1$. (Number of temperature values *ntemp* = 45). At each temperature value, *n_meas* = 10 lattice configurations are saved. Finally, this process is repeated for *n_init_config* = 10 initial configurations. Thus the data file contains a total of 10 x 10 x 45 = 4500 lattice configurations (100 configurations at each temperature value). The square lattice consisting of 20 x 20 sites are reshaped into an array of size = 400 containing values of $\pm1$, depending on whether the site contains a spin $\uparrow$ or $\downarrow$. # + colab={"base_uri": "https://localhost:8080/"} id="LzGiLd-YgKAX" outputId="bbbb10c7-dfdd-41dc-b2c4-6ab8e827f28c" X = ising_data.drop(['Temp','Unnamed: 0'],axis=1) X.shape # + [markdown] id="kyNGE9nynUMp" # Obtain the magnetization and magnetic susceptibility for each lattice configuration. # + id="b8QxPD95inxg" L = 20 m = X.sum(axis=1)/L**2; m2 = ((X*X).sum(axis=1))/L**2; temp = ising_data['Temp']; chi = (m2 - m**2)/temp mag = pd.DataFrame({'m': m, 'chi' : chi, 'Temp' : temp}) # + colab={"base_uri": "https://localhost:8080/", "height": 421} id="Q8AoF5l6ijKv" outputId="b825ebd5-49f0-4cde-d0a7-487d055c330c" plt.figure(figsize=(12,6)) plt.subplot(121) plt.title('Magnetic Susceptibility') plt.xlabel('Temp') plt.scatter(temp,chi) plt.subplot(122) plt.title('Magnetization') plt.xlabel('Temp') plt.scatter(temp,m) # + [markdown] id="Q4qpfZDbpS8c" # ### Critical Temperature ($T=T_c$) # # For each Monte-Carlo simulation, plot the magnetic susceptibility ($\chi$) # as a function of temperature. The critical temperature $T_c$ for this particular run can be approximated as the temperature at which the $\chi$ is maximum. $T_c$ is obtained by taking an average over all these values. # + colab={"base_uri": "https://localhost:8080/", "height": 366} id="NDwS6f1ioh69" outputId="0fa30bec-20ad-4110-f3bb-6f04d840eabb" n_init_config = 10; n_meas = 10; thi = 4.9; tlo = 0.5; tstep = 0.1; ntemp = int((thi-tlo)/tstep)+1; temperatures = np.linspace(thi,tlo,ntemp); chi_config = np.zeros((ntemp,n_init_config)) m_config = np.zeros((ntemp,n_init_config)) for j in range(n_init_config): for i in range(ntemp): chi_config[i,j] = np.average(mag['chi'][j*n_meas*ntemp+i*n_meas:j*n_meas*ntemp+(i+1)*n_meas-1]) m_config[i,j] = np.average(mag['m'][j*n_meas*ntemp+i*n_meas:j*n_meas*ntemp+(i+1)*n_meas-1]) plt.figure() plt.ylabel('Susceptibility') plt.xlabel('Temperature') for i in range(n_init_config): plt.scatter(temperatures,chi_config[:,i]) plt.show() Tcs = temperatures[np.argmax(chi_config,axis=0)] Tc = np.average(Tcs) print('\n Critical temperatures:') print(Tcs) print('\n Average critical temperature = ',Tc) # + [markdown] id="4Hfc2uEyrDpZ" # ## Plot the average magnetization and susceptibility. # + id="WcYEpmEZkDfB" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="c668c87b-821d-4895-b220-2616b5a964a7" mag.sort_values(by='Temp', ascending=False, inplace = True) # mag['Temp'].value_counts() nvalues = n_init_config*n_meas chi_avg = np.zeros((ntemp)) m_avg = np.zeros((ntemp)) for i in range(ntemp): chi_avg[i] = np.average(mag['chi'][i*nvalues:(i+1)*nvalues-1]) m_avg[i] = np.average(abs(mag['m'][i*nvalues:(i+1)*nvalues-1])) fig, ax1 = plt.subplots() plt.title('Avg. Magnetization and Susceptibility') plt.scatter(temperatures,m_avg,color='b') plt.ylabel('<|m|>') plt.xlabel('Temperature') ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis plt.ylabel('') plt.scatter(temperatures,chi_avg,color='r') fig.tight_layout() # otherwise the right y-label is slightly clipped plt.show() # + [markdown] id="7M5y7FNA5djv" # ### Lattice Configurations # # Let us plot sample lattice configurations from the three cases: # * High-temperature ($T \gt T_c$): There is no long-range order. The spins $\uparrow$ and $\downarrow$ are randomly distributed. # * Critical temperature ($T \approx T_c \approx 2.5 $): The spins begin to demonsrate long-range order. Hence, domains of either spin $\uparrow$ or $\downarrow$ appears. # * Low-temperature ($T \lt T_c$): All the spins are aligned either $\uparrow$ or $\downarrow$ exhibitng long range order. # + colab={"base_uri": "https://localhost:8080/", "height": 257} id="3T9h8Rw05beN" outputId="84878509-193f-4337-980f-22c32d3fa2cd" # Plot TTc, and T=Tc lattice_T_gt_Tc = ising_data.loc[ising_data['Temp']==4.0].sample().drop(['Temp','Unnamed: 0'],axis=1).values.reshape((20,20)) lattice_T_eq_Tc = ising_data.loc[ising_data['Temp']==2.5].sample().drop(['Temp','Unnamed: 0'],axis=1).values.reshape((20,20)) lattice_T_lt_Tc = ising_data.loc[ising_data['Temp']==1.0].sample().drop(['Temp','Unnamed: 0'],axis=1).values.reshape((20,20)) fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12,4)) axes[0].matshow(lattice_T_gt_Tc) axes[0].set_title("T =4.0 > Tc") axes[1].matshow(lattice_T_eq_Tc) axes[1].set_title("T = T c = 2.5") axes[2].matshow(lattice_T_lt_Tc) axes[2].set_title("T = 1.0 < Tc") for i in range(3): axes[i].set_xticks([]) axes[i].set_yticks([]) plt.show() # + [markdown] id="oY8Yf0nvKrg_" # ### Label the training data # # Assign a column 'Phase' and assign it to 1 if $T< T_c$ (ordered phase) and 0 if $T > T_c$ (disordered phase). # + id="uTu3thkXhkHh" colab={"base_uri": "https://localhost:8080/"} outputId="7f782442-aa8b-4ce5-f325-f940a42c700f" ising_data['Phase'] = (ising_data['Temp']